hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a74329acd2a79952cdd19e303436d84ca07dcf3c
| 185,596
|
py
|
Python
|
pyNastran/op2/tables/oef_forces/oef_complex_force_objects.py
|
JohannesSeidel/pyNastran
|
91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/op2/tables/oef_forces/oef_complex_force_objects.py
|
JohannesSeidel/pyNastran
|
91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/op2/tables/oef_forces/oef_complex_force_objects.py
|
JohannesSeidel/pyNastran
|
91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf
|
[
"BSD-3-Clause"
] | null | null | null |
from itertools import cycle
import numpy as np
from numpy import zeros, searchsorted, allclose
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.op2.result_objects.op2_objects import BaseElement
from pyNastran.op2.tables.oef_forces.oef_force_objects import ForceObject
from pyNastran.f06.f06_formatting import write_imag_floats_13e, write_float_12e # get_key0,
from pyNastran.f06.f06_formatting import _eigenvalue_header
class ComplexForceObject(ForceObject):
def __init__(self, data_code, isubcase, apply_data_code=True):
ForceObject.__init__(self, data_code, isubcase, apply_data_code=apply_data_code)
@property
def is_real(self):
return False
@property
def is_complex(self):
return True
class ComplexRodForceArray(ComplexForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
ComplexForceObject.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
if is_sort1:
pass
else:
raise NotImplementedError('SORT2')
def get_headers(self):
headers = ['axial_force', 'torque']
return headers
#def get_headers(self):
#headers = ['axial', 'torque']
#return headers
def build(self):
"""sizes the vectorized attributes of the ComplexRodForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[axial_force, torque]
self.data = zeros((self.ntimes, self.ntotal, 2), dtype='complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(column_values, column_names,
headers, self.element, self.data)
#data_frame = pd.Panel(self.data, items=column_values,
#major_axis=self.element, minor_axis=headers).to_frame()
#data_frame.columns.names = column_names
#data_frame.index.names = ['ElementID', 'Item']
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(axial1, torque1) = t1
(axial2, torque2) = t2
if not allclose(t1, t2):
msg += '(%s) (%s, %s) (%s, %s)\n' % (
eid,
axial1, torque1,
axial2, torque2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, axial, torque):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [axial, torque]
self.ielement += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
if self.element_type == 1: # CROD
msg = [' C O M P L E X F O R C E S I N R O D E L E M E N T S ( C R O D )\n']
elif self.element_type == 10: # CONROD
msg = [' C O M P L E X F O R C E S I N R O D E L E M E N T S ( C O N R O D )\n']
elif self.element_type == 3: # CTUBE
msg = [' C O M P L E X F O R C E S I N R O D E L E M E N T S ( C T U B E )\n']
#pass
else:
raise NotImplementedError('element_name=%s element_type=%s' % (self.element_name, self.element_type))
if is_mag_phase:
msg += [' (MAGNITUDE/PHASE)\n']
else:
msg += [' (REAL/IMAGINARY)\n']
if is_sort1:
msg += [
' \n'
' ELEMENT AXIAL TORSIONAL\n'
' ID. STRAIN STRAIN\n'
]
#' 14 0.0 / 0.0 0.0 / 0.0'
else:
raise NotImplementedError('sort2')
return self.element_name, msg
def get_element_index(self, eids):
# elements are always sorted; nodes are not
itot = searchsorted(eids, self.element) #[0]
return itot
def eid_to_element_node_index(self, eids):
#ind = ravel([searchsorted(self.element == eid) for eid in eids])
ind = searchsorted(eids, self.element)
#ind = ind.reshape(ind.size)
#ind.sort()
return ind
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
(elem_name, msg_temp) = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
#is_odd = False
#nwrite = len(eids)
#if len(eids) % 2 == 1:
#nwrite -= 1
#is_odd = True
#print('len(eids)=%s nwrite=%s is_odd=%s' % (len(eids), nwrite, is_odd))
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
axial = self.data[itime, :, 0]
torsion = self.data[itime, :, 1]
for eid, axiali, torsioni in zip(eids, axial, torsion):
out = write_imag_floats_13e([axiali, torsioni], is_mag_phase)
[raxial, rtorsion, iaxial, itorsion] = out
#ELEMENT AXIAL TORSIONAL
#ID. STRESS STRESS
#14 0.0 / 0.0 0.0 / 0.0
f06_file.write(' %8i %-13s / %-13s %-13s / %s\n' % (eid, raxial, iaxial, rtorsion, itorsion))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2, op2_ascii, itable, new_result, date,
is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
eids_device = self.element * 10 + self.device_code
if self.is_sort1:
struct1 = Struct(endian + b'i4f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('nelements=%i\n' % nelements)
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
axial = self.data[itime, :, 0]
torsion = self.data[itime, :, 1]
for eid_device, axiali, torsioni in zip(eids_device, axial, torsion):
data = [eid_device, axiali.real, torsioni.real, axiali.imag, torsioni.imag]
op2_ascii.write(' eid_device=%s data=%s\n' % (eid_device, tuple(data)))
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class ComplexCShearForceArray(BaseElement):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
BaseElement.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
#if is_sort1:
#pass
#else:
#raise NotImplementedError('SORT2')
@property
def is_real(self):
return False
@property
def is_complex(self):
return True
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self):
headers = [
'force41', 'force14', 'force21', 'force12', 'force32', 'force23',
'force43', 'force34', 'kickForce1', 'kickForce2', 'kickForce3',
'kickForce4', 'shear12', 'shear23', 'shear34', 'shear41'
]
return headers
#def get_headers(self):
#headers = ['axial', 'torque']
#return headers
def build(self):
"""sizes the vectorized attributes of the ComplexCShearForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[force41, force14, force21, force12, force32, force23, force43, force34,
#kick_force1, kick_force2, kick_force3, kick_force4,
#shear12, shear23, shear34, shear41]
self.data = zeros((self.ntimes, self.ntotal, 16), dtype='complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
#Mode 1 2
#EigenvalueReal -0.0 -0.0
#EigenvalueImag -0.0 -0.0
#Damping 0.0 0.0
#ElementID Item
#22 force41 2.927977e-10+0.000000e+00j 0.000000+0.000000j
# force14 2.927977e-10+5.855954e-10j 0.000000+0.000000j
# force21 -2.927977e-10+0.000000e+00j 0.000000+0.000000j
# force12 -2.927977e-10+5.855954e-10j 0.000000+0.000000j
# force32 2.927977e-10+0.000000e+00j 0.000000+0.000000j
# force23 2.927977e-10+5.855954e-10j 0.000000+0.000000j
# force43 -2.927977e-10+0.000000e+00j 0.000000+0.000000j
# force34 -2.927977e-10+5.855954e-10j 0.000000+0.000000j
# kickForce1 0.000000e+00+0.000000e+00j 0.000000+0.000000j
# kickForce2 0.000000e+00+0.000000e+00j 0.000000+0.000000j
# kickForce3 0.000000e+00+0.000000e+00j 0.000000+0.000000j
# kickForce4 0.000000e+00+0.000000e+00j 0.000000+0.000000j
# shear12 0.000000e+00+0.000000e+00j 0.000000+0.000000j
# shear23 0.000000e+00+0.000000e+00j 0.000000+0.000000j
# shear34 0.000000e+00+0.000000e+00j 0.000000+0.000000j
# shear41 0.000000e+00+0.000000e+00j 0.000000+0.000000j
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(force41a, force14a, force21a, force12a, force32a, force23a, force43a, force34a,
kick_force1a, kick_force2a, kick_force3a, kick_force4a,
shear12a, shear23a, shear34a, shear41a) = t1
(force41b, force14b, force21b, force12b, force32b, force23b, force43b, force34b,
kick_force1b, kick_force2b, kick_force3b, kick_force4b,
shear12b, shear23b, shear34b, shear41b) = t2
if not allclose(t1, t2):
msg += (
'%s (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n'
' (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n' % (
eid,
force41a, force14a, force21a, force12a, force32a, force23a,
force43a, force34a, kick_force1a, kick_force2a, kick_force3a,
kick_force4a, shear12a, shear23a, shear34a, shear41a,
force41b, force14b, force21b, force12b, force32b, force23b,
force43b, force34b, kick_force1b, kick_force2b, kick_force3b,
kick_force4b, shear12b, shear23b, shear34b, shear41b
))
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid,
force41, force14, force21, force12, force32, force23, force43, force34,
kick_force1, kick_force2, kick_force3, kick_force4,
shear12, shear23, shear34, shear41):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [
force41, force14, force21, force12, force32, force23, force43, force34,
kick_force1, kick_force2, kick_force3, kick_force4,
shear12, shear23, shear34, shear41]
self.ielement += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
msg = [' C O M P L E X F O R C E S A C T I N G O N S H E A R P A N E L E L E M E N T S (CSHEAR)\n']
if is_mag_phase:
msg += [' (MAGNITUDE/PHASE)\n \n']
else:
msg += [' (REAL/IMAGINARY)\n \n']
if is_sort1:
msg += [
' ====== POINT 1 ====== ====== POINT 2 ====== ====== POINT 3 ====== ====== POINT 4 ======\n'
' ELEMENT F-FROM-4 F-FROM-2 F-FROM-1 F-FROM-3 F-FROM-2 F-FROM-4 F-FROM-3 F-FROM-1\n'
' ID KICK-1 SHEAR-12 KICK-2 SHEAR-23 KICK-3 SHEAR-34 KICK-4 SHEAR-41\n'
]
else:
raise NotImplementedError('sort2')
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
## TODO: I'm sure this ordering is wrong...
force41 = self.data[itime, :, 0]
force14 = self.data[itime, :, 1]
force21 = self.data[itime, :, 2] # TODO: this is wrong...
force12 = self.data[itime, :, 3]
force32 = self.data[itime, :, 4]
force23 = self.data[itime, :, 5]
force43 = self.data[itime, :, 6]
force34 = self.data[itime, :, 7]
kick_force1 = self.data[itime, :, 8]
kick_force2 = self.data[itime, :, 9]
kick_force3 = self.data[itime, :, 10]
kick_force4 = self.data[itime, :, 11]
shear12 = self.data[itime, :, 12]
shear23 = self.data[itime, :, 13]
shear34 = self.data[itime, :, 14]
shear41 = self.data[itime, :, 15]
assert len(force12) > 0, force12
for (eid, iforce41, force14i, iforce21, iforce12, iforce32, iforce23, iforce43, iforce34,
ikick_force1, ikick_force2, ikick_force3, ikick_force4,
ishear12, ishear23, ishear34, ishear41) in zip(
eids, force41, force14, force21, force12, force32, force23, force43, force34,
kick_force1, kick_force2, kick_force3, kick_force4,
shear12, shear23, shear34, shear41):
vals2 = write_imag_floats_13e([
iforce41, force14i, iforce21, iforce12, iforce32, iforce23, iforce43, iforce34,
ikick_force1, ikick_force2, ikick_force3, ikick_force4,
ishear12, ishear23, ishear34, ishear41], is_mag_phase)
[
force41r, force14r, force21i, force12r, force32r, force23r, force43r, force34r,
kick_force1r, kick_force2r, kick_force3r, kick_force4r,
shear12r, shear23r, shear34r, shear41r,
force41i, force14i, force21i, force12i, force32i, force23i, force43i, force34i,
kick_force1i, kick_force2i, kick_force3i, kick_force4i,
shear12i, shear23i, shear34i, shear41i
] = vals2
#complex_cshear_force_f06
#' ====== POINT 1 ====== ====== POINT 2 ====== ====== POINT 3 ====== ====== POINT 4 ======'
#' ELEMENT F-FROM-4 F-FROM-2 F-FROM-1 F-FROM-3 F-FROM-2 F-FROM-4 F-FROM-3 F-FROM-1'
#' ID KICK-1 SHEAR-12 KICK-2 SHEAR-23 KICK-3 SHEAR-34 KICK-4 SHEAR-41'
#' 25 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0'
#' 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0'
f06_file.write(
' %8i %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'% (
eid,
force41r, force14r, force21i, force12r, force32r, force23r, force43r, force34r,
kick_force1r, kick_force2r, kick_force3r, kick_force4r,
shear12r, shear23r, shear34r, shear41r,
force41i, force14i, force21i, force12i, force32i, force23i, force43i, force34i,
kick_force1i, kick_force2i, kick_force3i, kick_force4i,
shear12i, shear23i, shear34i, shear41i
))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
class ComplexSpringDamperForceArray(ComplexForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
ComplexForceObject.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
#if is_sort1:
#pass
#else:
#raise NotImplementedError('SORT2')
def get_headers(self):
headers = ['spring_force']
return headers
#def get_headers(self):
#headers = ['axial', 'torque']
#return headers
def build(self):
"""sizes the vectorized attributes of the ComplexSpringDamperForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[axial_force, torque]
self.data = zeros((self.ntimes, self.ntotal, 1), dtype='complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
#Mode 1 2
#EigenvalueReal -0.0 -0.0
#EigenvalueImag -0.0 -0.0
#Damping 0.0 0.0
#ElementID Item
#30 spring_force 0.000000+0.000000j 0.000000+0.000000j
#31 spring_force 0.000000+0.000000j 0.000000+0.000000j
#32 spring_force 0.000000+0.000000j 0.000000+0.000000j
#33 spring_force 0.000000+0.000000j 0.000000+0.000000j
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, 0]
t2 = table.data[itime, ie, 0]
if not allclose([t1.real, t1.imag], [t2.real, t2.imag], atol=0.0001):
msg += '%s (%s, %s) (%s, %s)\n' % (
eid,
t1.real, t1.imag,
t2.real, t2.imag)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, force):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, 0] = force
self.ielement += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
# 11-CELAS1, 12-CELAS2, 13-CELAS3, 14-CELAS4
if self.element_type == 11:
msg = [' C O M P L E X F O R C E S I N S C A L A R S P R I N G S ( C E L A S 1 )\n']
elif self.element_type == 12:
msg = [' C O M P L E X F O R C E S I N S C A L A R S P R I N G S ( C E L A S 2 )\n']
elif self.element_type == 13:
msg = [' C O M P L E X F O R C E S I N S C A L A R S P R I N G S ( C E L A S 3 )\n']
elif self.element_type == 14:
msg = [' C O M P L E X F O R C E S I N S C A L A R S P R I N G S ( C E L A S 4 )\n']
elif self.element_type == 20: # CDAMP1
msg = [' C O M P L E X F O R C E S I N S C A L A R D A M P E R S ( C D A M P 1 )\n']
elif self.element_type == 21: # CDAMP2
msg = [' C O M P L E X F O R C E S I N S C A L A R D A M P E R S ( C D A M P 2 )\n']
elif self.element_type == 22: # CDAMP3
msg = [' C O M P L E X F O R C E S I N S C A L A R D A M P E R S ( C D A M P 3 )\n']
elif self.element_type == 23: # CDAMP4
msg = [' C O M P L E X F O R C E S I N S C A L A R D A M P E R S ( C D A M P 4 )\n']
else:
raise NotImplementedError('element_name=%s element_type=%s' % (self.element_name, self.element_type))
if is_mag_phase:
msg += [' (MAGNITUDE/PHASE)\n \n']
else:
msg += [' (REAL/IMAGINARY)\n \n']
if is_sort1:
msg += [
' ELEMENT ELEMENT\n'
' ID. FORCE ID. FORCE\n'
]
#' 14 0.0 / 0.0 0.0 / 0.0'
else:
msg += [' FREQUENCY FORCE FREQUENCY FORCE\n']
return msg
#def get_element_index(self, eids):
## elements are always sorted; nodes are not
#itot = searchsorted(eids, self.element) #[0]
#return itot
#def eid_to_element_node_index(self, eids):
##ind = ravel([searchsorted(self.element == eid) for eid in eids])
#ind = searchsorted(eids, self.element)
##ind = ind.reshape(ind.size)
##ind.sort()
#return ind
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
#is_odd = False
#nwrite = len(eids)
#if len(eids) % 2 == 1:
#nwrite -= 1
#is_odd = True
#print('len(eids)=%s nwrite=%s is_odd=%s' % (len(eids), nwrite, is_odd))
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
spring_force = self.data[itime, :, 0]
for eid, spring_forcei in zip(eids, spring_force):
[rspring, ispring] = write_imag_floats_13e([spring_forcei], is_mag_phase)
#ELEMENT AXIAL TORSIONAL
#ID. STRESS STRESS
#14 0.0 / 0.0 0.0 / 0.0
f06_file.write(' %8i %-13s / %-13s\n' % (eid, rspring, ispring))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
#device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
eids_device = self.element * 10 + self.device_code
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'i2f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('%s-nelements=%i\n' % (self.element_name, nelements))
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
force = self.data[itime, :, 0]
for eid, forcei in zip(eids_device, force):
data = [eid, forcei.real, forcei.imag]
op2_ascii.write(' eid=%s force=%s\n' % (eid, forcei))
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class ComplexSpringForceArray(ComplexSpringDamperForceArray): # 11-CELAS1,12-CELAS2,13-CELAS3, 14-CELAS4
def __init__(self, data_code, is_sort1, isubcase, dt):
ComplexSpringDamperForceArray.__init__(self, data_code, is_sort1, isubcase, dt)
class ComplexDamperForceArray(ComplexSpringDamperForceArray):
def __init__(self, data_code, is_sort1, isubcase, dt):
ComplexSpringDamperForceArray.__init__(self, data_code, is_sort1, isubcase, dt)
class ComplexViscForceArray(BaseElement):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
BaseElement.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
if is_sort1:
pass
else:
raise NotImplementedError('SORT2')
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self):
headers = ['axial_force', 'torque']
return headers
#def get_headers(self):
#headers = ['axial', 'torque']
#return headers
def build(self):
"""sizes the vectorized attributes of the ComplexViscForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[axial_force, torque]
self.data = zeros((self.ntimes, self.ntotal, 2), dtype='complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
#Mode 1 2 3 4
#EigenvalueReal -0.0 -0.0 -0.0 -0.0
#EigenvalueImag -0.0 -0.0 -0.0 -0.0
#Damping 0.0 0.0 0.0 0.0
#ElementID Item
#50 axial_force (-0+0j) (-0+0j) (-0+0j) (-0+0j)
# torque (-0+0j) (-0+0j) (-0+0j) (-0+0j)
#51 axial_force (-0+0j) (-0+0j) (-0+0j) (-0+0j)
# torque 0j (-0+0j) (-0+0j) (-0+0j)
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(column_values, column_names,
headers, self.element, self.data)
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(axial1, torque1) = t1
(axial2, torque2) = t2
if not allclose(t1, t2):
msg += '(%s) (%s, %s) (%s, %s)\n' % (
eid,
axial1, torque1,
axial2, torque2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, axial, torque):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [axial, torque]
self.ielement += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
#if self.element_type == 1: # CROD
#msg = [' C O M P L E X F O R C E S I N R O D E L E M E N T S ( C R O D )\n']
#elif self.element_type == 10: # CONROD
#msg = [' C O M P L E X F O R C E S I N R O D E L E M E N T S ( C O N R O D )\n']
#elif self.element_type == 3: # CTUBE
#msg = [' C O M P L E X F O R C E S I N R O D E L E M E N T S ( C T U B E )\n']
##pass
if self.element_type == 24:
msg = [' C O M P L E X F O R C E S I N V I S C E L E M E N T S ( C V I S C )\n']
else:
raise NotImplementedError('element_name=%s element_type=%s' % (self.element_name, self.element_type))
if is_mag_phase:
msg += [' (MAGNITUDE/PHASE)\n']
else:
msg += [' (REAL/IMAGINARY)\n']
if is_sort1:
msg += [
' \n'
' ELEMENT AXIAL TORSIONAL\n'
' ID. STRAIN STRAIN\n'
]
#' 14 0.0 / 0.0 0.0 / 0.0'
else:
raise NotImplementedError('sort2')
return self.element_name, msg
def get_element_index(self, eids):
# elements are always sorted; nodes are not
itot = searchsorted(eids, self.element) #[0]
return itot
def eid_to_element_node_index(self, eids):
#ind = ravel([searchsorted(self.element == eid) for eid in eids])
ind = searchsorted(eids, self.element)
#ind = ind.reshape(ind.size)
#ind.sort()
return ind
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
(elem_name, msg_temp) = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
#is_odd = False
#nwrite = len(eids)
#if len(eids) % 2 == 1:
#nwrite -= 1
#is_odd = True
#print('len(eids)=%s nwrite=%s is_odd=%s' % (len(eids), nwrite, is_odd))
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
axial = self.data[itime, :, 0]
torsion = self.data[itime, :, 1]
for eid, axiali, torsioni in zip(eids, axial, torsion):
out = write_imag_floats_13e([axiali, torsioni], is_mag_phase)
[raxial, rtorsion, iaxial, itorsion] = out
#ELEMENT AXIAL TORSIONAL
#ID. STRESS STRESS
#14 0.0 / 0.0 0.0 / 0.0
f06_file.write(' %8i %-13s / %-13s %-13s / %s\n' %
(eid, raxial, iaxial, rtorsion, itorsion))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
class ComplexPlateForceArray(ComplexForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
ComplexForceObject.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
#if is_sort1:
#pass
#else:
#raise NotImplementedError('SORT2')
def get_headers(self):
headers = ['mx', 'my', 'mxy', 'bmx', 'bmy', 'bmxy', 'tx', 'ty']
return headers
def build(self):
"""sizes the vectorized attributes of the ComplexPlateForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[mx, my, mxy, bmx, bmy, bmxy, tx, ty]
self.data = zeros((self.ntimes, self.ntotal, 8), dtype='complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
# Freq 0.00001 10.00000 20.00000 30.00000 40.00000 50.00000 60.00000
# ElementID Item
#8 mx 0j 0j 0j 0j (-361.6303-680.04156j) 0j 0j
# my 0j 0j 0j 0j (-7884.6196-14826.936j) 0j 0j
# mxy 0j 0j 0j 0j (-237.5723-446.7519j) 0j 0j
# bmx 0j 0j 0j 0j (5.514431+10.3698225j) 0j 0j
# bmy 0j 0j 0j 0j (10.107019+19.00613j) 0j 0j
# bmxy 0j 0j 0j 0j (-16.361727-30.768036j) 0j 0j
# tx 0j 0j 0j 0j (18.819313+35.3895j) 0j 0j
# ty 0j 0j 0j 0j (-61.55238-115.74853j) 0j 0j
#9 mx 0j 0j 0j 0j (1086.9078+2043.9175j) 0j 0j
# my 0j 0j 0j 0j (8089.895+15212.953j) 0j 0j
# mxy 0j 0j 0j 0j (-4725.3286-8885.925j) 0j 0j
# bmx 0j 0j 0j 0j (-3.9810739-7.486363j) 0j 0j
# bmy 0j 0j 0j 0j (-10.283798-19.338562j) 0j 0j
# bmxy 0j 0j 0j 0j (-8.663734-16.292051j) 0j 0j
# tx 0j 0j 0j 0j (54.14508+101.81919j) 0j 0j
# ty 0j 0j 0j 0j (-61.92162-116.44288j) 0j 0j
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(column_values, column_names,
headers, self.element, self.data)
#data_frame = pd.Panel(self.data, items=column_values,
#major_axis=self.element, minor_axis=headers).to_frame()
#data_frame.columns.names = column_names
#data_frame.index.names = ['ElementID', 'Item']
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(mx1, my1, mxy1, bmx1, bmy1, bmxy1, tx1, ty1) = t1
(mx2, my2, mxy2, bmx2, bmy2, bmxy2, tx2, ty2) = t2
if not allclose(t1, t2):
#if not np.array_equal(t1.real, t2.real):
msg += ('%-8s (%s, %s, %s, %s, %s, %s, %s, %s)\n'
'%-8s (%s, %s, %s, %s, %s, %s, %s, %s)\n' % (
eid,
#mx1.real, my1.real, mxy1.real, bmx1.real, bmy1.real,
#bmxy1.real, tx1.real, ty1.real,
mx1, my1, mxy1, bmx1, bmy1, bmxy1, tx1, ty1,
'',
mx2, my2, mxy2, bmx2, bmy2, bmxy2, tx2, ty2,
#mx2.real, my2.real, mxy2.real, bmx2.real, bmy2.real,
#bmxy2.real, tx2.real, ty2.real,
))
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [mx, my, mxy, bmx, bmy, bmxy, tx, ty]
self.ielement += 1
self.itotal += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
loads = [' ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -\n'
' ID FX FY FXY MX MY MXY QX QY\n',]
if is_mag_phase:
mag_real = [' (MAGNITUDE/PHASE)\n \n']
else:
mag_real = [' (REAL/IMAGINARY)\n \n']
cquad4_bilinear = [' C O M P L E X F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 )\n'] # good
cquad4_linear = [' C O M P L E X F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 )\n'] # good
ctria3 = [' C O M P L E X F O R C E S I N T R I A N G U L A R E L E M E N T S ( T R I A 3 )\n'] # good
cquad8 = [' C O M P L E X F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 8 )\n']
cquadr = [' C O M P L E X F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D R )\n']
ctria6 = [' C O M P L E X F O R C E S I N T R I A N G U L A R E L E M E N T S ( T R I A 6 )\n']
ctriar = [' C O M P L E X F O R C E S I N T R I A N G U L A R E L E M E N T S ( T R I A R )\n']
#is_bilinear = False
if self.element_type == 144: # CQUAD4
msg = cquad4_linear + mag_real + loads
elif self.element_type == 33: # CQUAD4
msg = cquad4_bilinear + mag_real + loads
elif self.element_type == 64: #CQUAD8
msg = cquad8 + mag_real + loads
elif self.element_type == 82: # CQUADR
msg = cquadr + mag_real + loads
elif self.element_type == 74: # CTRIA3
msg = ctria3 + mag_real + loads
elif self.element_type == 75: # CTRIA6
msg = ctria6 + mag_real + loads
elif self.element_type == 70: # CTRIAR
msg = ctriar + mag_real + loads
else:
raise NotImplementedError('name=%r type=%s' % (self.element_name, self.element_type))
return msg
def get_element_index(self, eids):
# elements are always sorted; nodes are not
itot = searchsorted(eids, self.element) #[0]
return itot
def eid_to_element_node_index(self, eids):
#ind = ravel([searchsorted(self.element == eid) for eid in eids])
ind = searchsorted(eids, self.element)
#ind = ind.reshape(ind.size)
#ind.sort()
return ind
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
mx = self.data[itime, :, 0]
my = self.data[itime, :, 1]
mxy = self.data[itime, :, 2]
bmx = self.data[itime, :, 3]
bmy = self.data[itime, :, 4]
bmxy = self.data[itime, :, 5]
tx = self.data[itime, :, 6]
ty = self.data[itime, :, 7]
for eid, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi in zip(eids, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
out = write_imag_floats_13e([mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi], is_mag_phase)
[smxr, smyr, smxyr, sbmxr, sbmyr, sbmxyr, stxr, styr,
smxi, smyi, smxyi, sbmxi, sbmyi, sbmxyi, stxi, styi] = out
#"""
#ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -
#ID FX FY FXY MX MY MXY QX QY
#0 564 1.543439E+03 7.311177E+02 1.322702E+02 1.080178E+00 1.699104E+00 2.618547E-01 3.877034E+01 4.518554E+00
#358.3129 358.0245 177.5593 177.5292 178.2112 0.0907 358.1465 179.4567
#"""
# fx fy fxy mx my mxy qx qy
f06_file.write(
'0 %8i %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %8s %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, smxr, smyr, smxyr, sbmxr, sbmyr, sbmxyr, stxr, styr,
'', smxi, smyi, smxyi, sbmxi, sbmyi, sbmxyi, stxi, styi))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
eids = self.element
eids_device = eids * 10 + self.device_code
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'i 16f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('%s-nelements=%i\n' % (self.element_name, nelements))
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
mx = self.data[itime, :, 0]
my = self.data[itime, :, 1]
mxy = self.data[itime, :, 2]
bmx = self.data[itime, :, 3]
bmy = self.data[itime, :, 4]
bmxy = self.data[itime, :, 5]
tx = self.data[itime, :, 6]
ty = self.data[itime, :, 7]
for eid_device, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi in zip(eids_device, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
data = [eid_device,
mxi.real, myi.real, mxyi.real, bmxi.real, bmyi.real, bmxyi.real, txi.real, tyi.real,
mxi.imag, myi.imag, mxyi.imag, bmxi.imag, bmyi.imag, bmxyi.imag, txi.imag, tyi.imag]
op2_ascii.write(' eid_device=%s data=%s\n' % (eid_device, str(data)))
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class ComplexPlate2ForceArray(ComplexForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
ComplexForceObject.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
#if is_sort1:
#pass
#else:
#raise NotImplementedError('SORT2')
def get_headers(self):
headers = ['mx', 'my', 'mxy', 'bmx', 'bmy', 'bmxy', 'tx', 'ty']
return headers
def build(self):
"""sizes the vectorized attributes of the ComplexPlate2ForceArray"""
if self.is_built:
return
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
self.element_node = zeros((self.ntotal, 2), dtype='int32')
#[mx, my, mxy, bmx, bmy, bmxy, tx, ty]
self.data = zeros((self.ntimes, self.ntotal, 8), dtype='complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
assert 0 not in self.element
#print(self.element_node)
element_node = [self.element_node[:, 0], self.element_node[:, 1]]
assert 0 not in self.element_node[:, 0]
if self.nonlinear_factor not in (None, np.nan):
# Freq 0.00001 10.00000 20.00000 30.00000 40.00000 50.00000 60.00000
# ElementID NodeID Item
# 6 0 mx 0j 0j 0j 0j (-705.7376-1327.1312j) 0j 0j
# my 0j 0j 0j 0j (7404.8853+13924.8j) 0j 0j
# mxy 0j 0j 0j 0j (-101.319756-190.53061j) 0j 0j
# bmx 0j 0j 0j 0j (3.0701134+5.7733126j) 0j 0j
# bmy 0j 0j 0j 0j (98.75731+185.71196j) 0j 0j
# bmxy 0j 0j 0j 0j (0.25202343+0.4739271j) 0j 0j
# tx 0j 0j 0j 0j (14.426779+27.129389j) 0j 0j
# ty 0j 0j 0j 0j (-199.6823-375.5002j) 0j 0j
# 4 mx 0j 0j 0j 0j (-2934.639-5518.5537j) 0j 0j
# my 0j 0j 0j 0j (7516.2485+14134.217j) 0j 0j
# mxy 0j 0j 0j 0j (-101.319756-190.53061j) 0j 0j
# bmx 0j 0j 0j 0j (-19.69526-37.036705j) 0j 0j
# bmy 0j 0j 0j 0j (100.64615+189.2639j) 0j 0j
# bmxy 0j 0j 0j 0j (0.25202343+0.4739271j) 0j 0j
# tx 0j 0j 0j 0j (14.426779+27.129389j) 0j 0j
# ty 0j 0j 0j 0j (-199.6823-375.5002j) 0j 0j
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_element_node(
column_values, column_names,
headers, self.element_node, self.data)
#data_frame = pd.Panel(self.data, items=column_values,
#major_axis=element_node, minor_axis=headers).to_frame()
#data_frame.columns.names = column_names
#data_frame.index.names = ['ElementID', 'NodeID', 'Item']
else:
data_frame = pd.Panel(self.data,
major_axis=element_node, minor_axis=headers).to_frame()
data_frame.columns.names = ['Static']
data_frame.index.names = ['ElementID', 'NodeID', 'Item']
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, e in enumerate(self.element_node):
(eid, nid) = e
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(mx1, my1, mxy1, bmx1, bmy1, bmxy1, tx1, ty1) = t1
(mx2, my2, mxy2, bmx2, bmy2, bmxy2, tx2, ty2) = t2
if not allclose(t1, t2):
base1 = '(%s, %s) ' % (eid, nid)
base2 = ' ' * len(base1)
msg += (
'%s (%s, %s, %s, %s, %s, %s, %s, %s)\n'
'%s(%s, %s, %s, %s, %s, %s, %s, %s)\n' % (
base1,
mx1, my1, mxy1, bmx1, bmy1, bmxy1, tx1, ty1,
base2,
mx2, my2, mxy2, bmx2, bmy2, bmxy2, tx2, ty2))
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
if i > 0:
raise ValueError(msg)
return True
def add_new_element_sort1(self, dt, eid, term, nid, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.element_node[self.itotal, :] = [eid, nid]
self.data[self.itime, self.itotal, :] = [mx, my, mxy, bmx, bmy, bmxy, tx, ty]
self.itotal += 1
self.ielement += 1
def add_sort1(self, dt, eid, nid, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
#assert self.element[self.ielement - 1] == eid, eid
self.element_node[self.itotal, :] = [eid, nid]
self.data[self.itime, self.itotal, :] = [mx, my, mxy, bmx, bmy, bmxy, tx, ty]
self.itotal += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
loads = [
' ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -\n'
' ID FX FY FXY MX MY MXY QX QY\n',]
if is_mag_phase:
mag_real = [' (MAGNITUDE/PHASE)\n \n']
else:
mag_real = [' (REAL/IMAGINARY)\n \n']
cquad4_bilinear = [' C O M P L E X F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 )\n'] # good
cquad4_linear = [' C O M P L E X F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 )\n'] # good
ctria3 = [' C O M P L E X F O R C E S I N T R I A N G U L A R E L E M E N T S ( T R I A 3 )\n'] # good
cquad8 = [' C O M P L E X F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 8 )\n']
cquadr = [' C O M P L E X F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D R )\n']
ctria6 = [' C O M P L E X F O R C E S I N T R I A N G U L A R E L E M E N T S ( T R I A 6 )\n']
ctriar = [' C O M P L E X F O R C E S I N T R I A N G U L A R E L E M E N T S ( T R I A R )\n']
#is_bilinear = False
if self.element_type == 144: # CQUAD4
msg = cquad4_linear + mag_real + loads
elif self.element_type == 33: # CQUAD4
msg = cquad4_bilinear + mag_real + loads
elif self.element_type == 64: #CQUAD8
msg = cquad8 + mag_real + loads
elif self.element_type == 82: # CQUADR
msg = cquadr + mag_real + loads
elif self.element_type == 74: # CTRIA3
msg = ctria3 + mag_real + loads
elif self.element_type == 75: # CTRIA6
msg = ctria6 + mag_real + loads
elif self.element_type == 70: # CTRIAR
msg = ctriar + mag_real + loads
else:
raise NotImplementedError('name=%r type=%s' % (self.element_name, self.element_type))
return msg
def get_element_index(self, eids):
# elements are always sorted; nodes are not
itot = searchsorted(eids, self.element) #[0]
return itot
def eid_to_element_node_index(self, eids):
#ind = ravel([searchsorted(self.element == eid) for eid in eids])
ind = searchsorted(eids, self.element)
#ind = ind.reshape(ind.size)
#ind.sort()
return ind
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
mx = self.data[itime, :, 0]
my = self.data[itime, :, 1]
mxy = self.data[itime, :, 2]
bmx = self.data[itime, :, 3]
bmy = self.data[itime, :, 4]
bmxy = self.data[itime, :, 5]
tx = self.data[itime, :, 6]
ty = self.data[itime, :, 7]
for eid, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi in zip(eids, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
out = write_imag_floats_13e([mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi], is_mag_phase)
[smxr, smyr, smxyr, sbmxr, sbmyr, sbmxyr, stxr, styr,
smxi, smyi, smxyi, sbmxi, sbmyi, sbmxyi, stxi, styi] = out
#"""
#ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -
#ID FX FY FXY MX MY MXY QX QY
#0 564 1.543439E+03 7.311177E+02 1.322702E+02 1.080178E+00 1.699104E+00 2.618547E-01 3.877034E+01 4.518554E+00
#358.3129 358.0245 177.5593 177.5292 178.2112 0.0907 358.1465 179.4567
#"""
# fx fy fxy mx my mxy qx qy
f06_file.write(
'0 %8i %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %8s %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, smxr, smyr, smxyr, sbmxr, sbmyr, sbmxyr, stxr, styr,
'', smxi, smyi, smxyi, sbmxi, sbmyi, sbmxyi, stxi, styi))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
#eids = self.element
eids_device = self.element * 10 + self.device_code
eids = self.element_node[:, 0]
nids = self.element_node[:, 1]
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = len(self.element)
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
nnodes_all = 5
numwide_imag = 2 + nnodes_all * 17
assert ntotali == numwide_imag
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'i 4s i 16f')
struct2 = Struct(endian + b'i 16f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('%s-nelements=%i\n' % (self.element_name, nelements))
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
mx = self.data[itime, :, 0]
my = self.data[itime, :, 1]
mxy = self.data[itime, :, 2]
bmx = self.data[itime, :, 3]
bmy = self.data[itime, :, 4]
bmxy = self.data[itime, :, 5]
tx = self.data[itime, :, 6]
ty = self.data[itime, :, 7]
nwide = 0
ielement = -1
for eid, nid, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi in zip(eids, nids, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
if nid == 0:
ielement += 1
eid_device = eids_device[ielement]
data = [eid_device, b'CEN/', nid,
mxi.real, myi.real, mxyi.real, bmxi.real, bmyi.real, bmxyi.real, txi.real, tyi.real,
mxi.imag, myi.imag, mxyi.imag, bmxi.imag, bmyi.imag, bmxyi.imag, txi.imag, tyi.imag]
op2_ascii.write(' eid_device=%s data=%s\n' % (eid_device, str(data)))
op2.write(struct1.pack(*data))
else:
data = [nid,
mxi.real, myi.real, mxyi.real, bmxi.real, bmyi.real, bmxyi.real, txi.real, tyi.real,
mxi.imag, myi.imag, mxyi.imag, bmxi.imag, bmyi.imag, bmxyi.imag, txi.imag, tyi.imag]
op2_ascii.write(' data=%s\n' % (str(data)))
op2.write(struct2.pack(*data))
nwide += len(data)
assert nwide == ntotal, 'nwide=%s ntotal=%s' % (nwide, ntotal)
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class ComplexCBarForceArray(ComplexForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
ComplexForceObject.__init__(self, data_code, isubcase)
self.result_flag = 0
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.itime = 0
self.nelements = 0 # result specific
#self.element_type = 'CBAR'
#self.cid = {} # gridGauss
#if is_sort1:
##sort1
#pass
#else:
#raise NotImplementedError('SORT2')
def get_headers(self):
headers = ['bending_moment_1a', 'bending_moment_2a',
'bending_moment_1b', 'bending_moment_2b',
'shear1', 'shear2', 'axial', 'torque', ]
return headers
def build(self):
"""sizes the vectorized attributes of the ComplexCBarForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (
#self.ntimes, self.nelements, self.ntotal, self.subtitle))
if self.is_built:
return
nnodes = 1
#self.names = []
#self.nelements //= nnodes
self.nelements //= self.ntimes
#self.ntotal //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
self.is_built = True
#print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
self._times = zeros(self.ntimes, 'float32')
self.element = zeros(self.ntotal, 'int32')
# the number is messed up because of the offset for the element's properties
if not self.nelements * nnodes == self.ntotal:
msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (
self.ntimes, self.nelements, nnodes, self.nelements * nnodes, self.ntotal)
raise RuntimeError(msg)
#[bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq]
self.data = zeros((self.ntimes, self.ntotal, 8), 'complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(column_values, column_names,
headers, self.element, self.data)
#self.data_frame = pd.Panel(self.data, items=column_values,
#major_axis=self.element, minor_axis=headers).to_frame()
#self.data_frame.columns.names = column_names
#self.data_frame.index.names = ['ElementID', 'Item']
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for ieid, eid in enumerate(self.element):
t1 = self.data[itime, ieid, :]
t2 = table.data[itime, ieid, :]
(s1a1, s2a1, s3a1, s4a1, axial1, s2a1, s2b1, s2c1, s2d1) = t1
(s1a2, s2a2, s3a2, s4a2, axial2, s2a2, s2b2, s2c2, s2d2) = t2
#d = t1 - t2
if not allclose([s1a1.real, s2a1.real, s3a1.real, s4a1.real, axial1.real, s2a1.real, s2b1.real, s2c1.real, s2d1.real],
[s1a2.real, s2a2.real, s3a2.real, s4a2.real, axial2.real, s2a2.real, s2b2.real, s2c2.real, s2d2.real], atol=0.0001):
#if not np.array_equal(t1, t2):
msg += '%-4s (%s, %s, %s, %s, %s, %s, %s, %s, %s)\n (%s, %s, %s, %s, %s, %s, %s, %s, %s)\n' % (
eid,
s1a1.real, s2a1.real, s3a1.real, s4a1.real, axial1.real, s2a1.real, s2b1.real, s2c1.real, s2d1.real,
s1a2.real, s2a2.real, s3a2.real, s4a2.real, axial2.real, s2a2.real, s2b2.real, s2c2.real, s2d2.real,
)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.data[self.itime, self.itotal, :] = [bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq]
self.element[self.itotal] = eid
self.itotal += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
else:
msg.append(' type=%s nelements=%i; table_name=%r\n' % (
self.__class__.__name__, nelements, self.table_name))
msg.append(' eType, cid\n')
msg.append(' data: [ntimes, nelements, 8] where 8=[%s]\n' % str(', '.join(self.get_headers())))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' is_sort1=%s is_sort2=%s\n' % (self.is_sort1, self.is_sort2))
msg.append(' CBAR\n')
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
#msg_temp, nnodes = get_f06_header(self, is_mag_phase, is_sort1)
#is_sort1 = False
if is_mag_phase:
mag_phase = ' (MAGNITUDE/PHASE)\n \n'
else:
mag_phase = ' (REAL/IMAGINARY)\n \n'
name = self.data_code['name']
if name == 'freq':
name = 'FREQUENCY'
#else: # mode
#raise RuntimeError(name)
if is_sort1:
line1 = '0 ELEMENT BEND-MOMENT-END-A BEND-MOMENT-END-B SHEAR\n'
line2 = ' ID. PLANE 1 PLANE 2 PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE\n'
else:
line1 = ' BEND-MOMENT-END-A BEND-MOMENT-END-B SHEAR\n'
line2 = ' %16s PLANE 1 PLANE 2 PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE\n' % name
# force
msg_temp = header + [
' C O M P L E X F O R C E S I N B A R E L E M E N T S ( C B A R )\n',
mag_phase,
' ',
line1,
line2,
]
if self.is_sort1:
assert self.is_sort1 == True, str(self)
if is_sort1:
page_num = self._write_sort1_as_sort1(f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase)
else:
self._write_sort1_as_sort2(f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase)
else:
assert self.is_sort1 == True, str(self)
return page_num - 1
def _write_sort1_as_sort1(self, f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase):
eids = self.element
#times = self._times
ntimes = self.data.shape[0]
for itime in range(ntimes):
dt = self._times[itime]
dt_line = ' %14s = %12.5E\n' % (self.data_code['name'], dt)
header[1] = dt_line
msg = header + msg_temp
f06_file.write(''.join(msg))
#bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq
assert self.is_sort1 == True, str(self)
bm1a = self.data[itime, :, 0]
bm2a = self.data[itime, :, 1]
bm1b = self.data[itime, :, 2]
bm2b = self.data[itime, :, 3]
ts1 = self.data[itime, :, 4]
ts2 = self.data[itime, :, 5]
af = self.data[itime, :, 6]
trq = self.data[itime, :, 7]
for eid, bm1ai, bm2ai, bm1bi, bm2bi, ts1i, ts2i, afi, trqi in zip(eids, bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq):
vals = (bm1ai, bm2ai, bm1bi, bm2bi, ts1i, ts2i, afi, trqi)
vals2 = write_imag_floats_13e(vals, is_mag_phase)
(bm1air, bm2air, bm1bir, bm2bir, ts1ir, ts2ir, afir, trqir,
bm1aii, bm2aii, bm1bii, bm2bii, ts1ii, ts2ii, afii, trqii) = vals2
f06_file.write('0%16i %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %14s %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, bm1air, bm2air, bm1bir, bm2bir, ts1ir, ts2ir, afir, trqir,
'', bm1aii, bm2aii, bm1bii, bm2bii, ts1ii, ts2ii, afii, trqii))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num
def _write_sort1_as_sort2(self, f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase):
eids = self.element
times = self._times
#ntimes = self.data.shape[0]
for ieid, eid in enumerate(eids):
eid_line = ' ELEMENT-ID = %s' % (eid)
header[1] = eid_line
msg = header + msg_temp
f06_file.write(''.join(msg))
#bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq
bm1a = self.data[:, ieid, 0]
bm2a = self.data[:, ieid, 1]
bm1b = self.data[:, ieid, 2]
bm2b = self.data[:, ieid, 3]
ts1 = self.data[:, ieid, 4]
ts2 = self.data[:, ieid, 5]
af = self.data[:, ieid, 6]
trq = self.data[:, ieid, 7]
for dt, bm1ai, bm2ai, bm1bi, bm2bi, ts1i, ts2i, afi, trqi in zip(times, bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq):
vals = (bm1ai, bm2ai, bm1bi, bm2bi, ts1i, ts2i, afi, trqi)
vals2 = write_imag_floats_13e(vals, is_mag_phase)
(bm1air, bm2air, bm1bir, bm2bir, ts1ir, ts2ir, afir, trqir,
bm1aii, bm2aii, bm1bii, bm2bii, ts1ii, ts2ii, afii, trqii) = vals2
f06_file.write('0%16s %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %15s %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
write_float_12e(dt),
bm1air, bm2air, bm1bir, bm2bir, ts1ir, ts2ir, afir, trqir,
'', bm1aii, bm2aii, bm1bii, bm2bii, ts1ii, ts2ii, afii, trqii))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
eids = self.element
eids_device = eids * 10 + self.device_code
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'i 16f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('%s-nelements=%i\n' % (self.element_name, nelements))
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
#bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq
bm1a = self.data[itime, :, 0]
bm2a = self.data[itime, :, 1]
bm1b = self.data[itime, :, 2]
bm2b = self.data[itime, :, 3]
ts1 = self.data[itime, :, 4]
ts2 = self.data[itime, :, 5]
af = self.data[itime, :, 6]
trq = self.data[itime, :, 7]
assert len(eids_device) == len(bm1a.real)
for eid_device, bm1ai, bm2ai, bm1bi, bm2bi, ts1i, ts2i, afi, trqi in zip(
eids_device, bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq):
data = [eid_device,
bm1ai.real, bm2ai.real, bm1bi.real, bm2bi.real, ts1i.real, ts2i.real, afi.real, trqi.real,
bm1ai.imag, bm2ai.imag, bm1bi.imag, bm2bi.imag, ts1i.imag, ts2i.imag, afi.imag, trqi.imag]
op2_ascii.write(' eid_device=%s data=%s\n' % (eid_device, str(data)))
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class ComplexCBeamForceArray(ComplexForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
ComplexForceObject.__init__(self, data_code, isubcase)
self.result_flag = 0
self.itime = 0
self.nelements = 0 # result specific
#self.element_type = 'CBEAM'
#if is_sort1:
##sort1
#pass
#else:
#raise NotImplementedError('SORT2')
def get_headers(self):
headers = [
'sd', 'bending_moment1', 'bending_moment2', 'shear1', 'shear2',
'axial_force', 'total_torque', 'warping_torque', ]
return headers
def build(self):
"""sizes the vectorized attributes of the ComplexCBeamForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (
#self.ntimes, self.nelements, self.ntotal, self.subtitle))
nnodes = 11
#self.names = []
#self.nelements //= nnodes
self.nelements //= self.ntimes
#self.ntotal //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
self.is_built = True
#print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
self._times = zeros(self.ntimes, 'float32')
self.element = zeros(self.ntotal, 'int32')
self.element_node = zeros((self.ntotal, 2), 'int32')
# the number is messed up because of the offset for the element's properties
if not self.nelements * nnodes == self.ntotal:
msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (
self.ntimes, self.nelements, nnodes, self.nelements * nnodes, self.ntotal)
raise RuntimeError(msg)
#[sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq]
self.data = zeros((self.ntimes, self.ntotal, 8), 'complex64')
def finalize(self):
sd = self.data[0, :, 0].real
i_sd_zero = np.where(sd != 0.0)[0]
i_node_zero = np.where(self.element_node[:, 1] != 0)[0]
assert i_node_zero.max() > 0, 'CBEAM element_node hasnt been filled'
i = np.union1d(i_sd_zero, i_node_zero)
self.element = self.element[i]
self.element_node = self.element_node[i, :]
self.data = self.data[:, i, :]
def build_dataframe(self):
"""creates a pandas dataframe"""
# Freq 0.00001 10.00000 ... 50.00000 60.00000
# ElementID Location Item ...
# 12.0 12.0 bending_moment1 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# bending_moment2 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# shear1 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# shear2 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# axial_force 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# total_torque 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# warping_torque 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# 0.0 1.0 bending_moment1 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# bending_moment2 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# shear1 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# shear2 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# axial_force 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# total_torque 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# warping_torque 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
import pandas as pd
headers = self.get_headers()[1:]
column_names, column_values = self._build_dataframe_transient_header()
element_location = [
self.element_node[:, 0],
self.data[0, :, 0].real,
]
is_v25 = pd.__version__ >= '0.25'
if is_v25:
print(f'skipping pandas {self.class_name}')
return
# wrong type for ElementID
#data_frame = self._build_pandas_transient_element_node(
#column_values, column_names,
#headers, element_location, self.data[:, :, 1:])
#data_frame.index.names = ['ElementID', 'Location', 'Item']
#data_frame.index['ElementID', :]# .astype('int32')
#print(data_frame)
data_frame = pd.Panel(self.data[:, :, 1:], items=column_values,
major_axis=element_location, minor_axis=headers).to_frame()
data_frame.columns.names = column_names
data_frame.index.names = ['ElementID', 'Location', 'Item']
#print(data_frame)
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
return self.assert_equal(table)
def assert_equal(self, table, rtol=1.e-5, atol=1.e-8):
assert self.is_sort1 == table.is_sort1
self._eq_header(table)
if not np.allclose(self.data, table.data, atol=atol):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for ieid, eid in enumerate(self.element):
t1 = self.data[itime, ieid, :]
t2 = table.data[itime, ieid, :]
#print(t1)
#'sd', 'bending_moment1', 'bending_moment2', 'shear1', 'shear2',
#'axial_force', 'total_torque', 'warping_torque', ]
(sd1, bm11, bm21, shear11, shear21, axial1, total_torque1, warp_torque1) = t1
(sd2, bm12, bm22, shear12, shear22, axial2, total_torque2, warp_torque2) = t2
d = t1 - t2
if not allclose(t1, t2, atol=atol):
msg += (
'%-4s (%s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj)\n'
' (%s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj)\n'
' dt12=(%s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj)\n' % (
eid,
bm11.real, bm11.imag,
bm21.real, bm21.imag,
shear11.real, shear11.imag,
shear21.real, shear21.imag,
axial1.real, axial1.imag,
total_torque1.real, total_torque1.imag,
warp_torque1.real, warp_torque1.imag,
bm12.real, bm12.imag,
bm22.real, bm22.imag,
shear12.real, shear12.imag,
shear22.real, shear22.imag,
axial2.real, axial2.imag,
total_torque2.real, total_torque2.imag,
warp_torque2.real, warp_torque2.imag,
d[0].real, d[0].imag,
d[1].real, d[1].imag,
d[2].real, d[2].imag,
d[3].real, d[3].imag,
d[4].real, d[4].imag,
d[5].real, d[5].imag,
d[6].real, d[6].imag,
))
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
#def add_new_element_sort1(self, dt, eid, nid, sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq):
#return self.add_sort1(dt, eid, nid, sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq)
def add_sort1(self, dt, eid, nid, sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.data[self.itime, self.itotal, :] = [sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq]
self.element[self.itotal] = eid
self.element_node[self.itotal, :] = [eid, nid]
self.itotal += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
else:
msg.append(' type=%s nelements=%i\n' % (self.__class__.__name__, nelements))
#msg.append(' eType, cid\n')
msg.append(' data: [ntimes, nelements, 8] where 8=[%s]\n' % str(', '.join(self.get_headers())))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' is_sort1=%s is_sort2=%s\n' % (self.is_sort1, self.is_sort2))
msg.append(' CBEAM\n')
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
# option B
#' C O M P L E X F O R C E S I N B E A M E L E M E N T S ( C B E A M ) '
#' (REAL/IMAGINARY)'
#' STAT DIST/ - BENDING MOMENTS - - WEB SHEARS - AXIAL TOTAL WARPING'
#' ELEMENT-ID GRID LENGTH PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE TORQUE'
#'0 20'
#'0 11 0.000 0.0 0.0 0.0 0.0 0.0 0.0 0.0'
#' 0.0 0.0 0.0 0.0 0.0 0.0 0.0'
#'0 12 1.000 0.0 0.0 0.0 0.0 0.0 0.0 0.0'
#' 0.0 0.0 0.0 0.0 0.0 0.0 0.0'
#msg_temp, nnodes = get_f06_header(self, is_mag_phase, is_sort1)
#print('write_f06 not implemented for ComplexCBeamForceArray')
#return page_num
#asdf
#is_sort1 = False
if is_mag_phase:
mag_phase = ' (MAGNITUDE/PHASE)\n \n'
else:
mag_phase = ' (REAL/IMAGINARY)\n \n'
if is_sort1:
line1 = '0 ELEMENT BEND-MOMENT-END-A BEND-MOMENT-END-B SHEAR\n'
line2 = ' ID. PLANE 1 PLANE 2 PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE\n'
else:
name = self.data_code['name']
if name == 'freq':
name = 'FREQUENCY'
else: # mode
raise RuntimeError(name)
line1 = ' BEND-MOMENT-END-A BEND-MOMENT-END-B SHEAR\n'
line2 = ' %16s PLANE 1 PLANE 2 PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE\n' % name
# force
msg_temp = header + [
' C O M P L E X F O R C E S I N B A R E L E M E N T S ( C B E A M )\n',
mag_phase,
' ',
line1,
line2,
]
if self.is_sort1:
assert self.is_sort1 == True, str(self)
#if is_sort1:
page_num = self._write_sort1_as_sort1(f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase)
#else:
#self._write_sort1_as_sort2(f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase)
else:
assert self.is_sort1 == True, str(self)
return page_num - 1
def _write_sort1_as_sort1(self, f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase):
eids = self.element
#times = self._times
ntimes = self.data.shape[0]
for itime in range(ntimes):
dt = self._times[itime]
dt_line = ' %14s = %12.5E\n' % (self.data_code['name'], dt)
header[1] = dt_line
msg = header + msg_temp
f06_file.write(''.join(msg))
#bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq
assert self.is_sort1 == True, str(self)
#sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq
sd = self.data[itime, :, 0]
bm1 = self.data[itime, :, 1]
bm2 = self.data[itime, :, 2]
ts1 = self.data[itime, :, 3]
ts2 = self.data[itime, :, 4]
af = self.data[itime, :, 5]
ttrq = self.data[itime, :, 6]
wtrq = self.data[itime, :, 7]
for eid, sdi, bm1i, bm2i, ts1i, ts2i, afi, ttrqi, wtrqi in zip(eids, sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq):
vals = (sdi, bm1i, bm2i, ts1i, ts2i, afi, ttrqi, wtrqi)
vals2 = write_imag_floats_13e(vals, is_mag_phase)
(sdir, bm1ir, bm2ir, ts1ir, ts2ir, afir, ttrqir, wtrqir,
sdii, bm1ii, bm2ii, ts1ii, ts2ii, afii, ttrqii, wtrqii) = vals2
f06_file.write('0%16i %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %14s %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, sdir, bm1ir, bm2ir, ts1ir, ts2ir, afir, ttrqir, wtrqir,
'', sdii, bm1ii, bm2ii, ts1ii, ts2ii, afii, ttrqii, wtrqii))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
eids = self.element_node[:, 0]
nids = self.element_node[:, 1]
#long_form = False
#if nids.min() == 0:
#long_form = True
eids_device = eids * 10 + self.device_code
ueids = np.unique(eids)
#ieid = np.searchsorted(eids, ueids)
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = len(ueids)
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'2i 15f')
struct2 = Struct(endian + b'i 15f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('nelements=%i\n' % nelements)
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
sd = self.data[itime, :, 0]
bm1 = self.data[itime, :, 1]
bm2 = self.data[itime, :, 2]
ts1 = self.data[itime, :, 3]
ts2 = self.data[itime, :, 4]
af = self.data[itime, :, 5]
ttrq = self.data[itime, :, 6]
wtrq = self.data[itime, :, 7]
icount = 0
nwide = 0
ielement = 0
assert len(eids) == len(sd)
for eid, sdi, bm1i, bm2i, ts1i, ts2i, afi, ttrqi, wtrqi in zip(eids, sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq):
if icount == 0:
eid_device = eids_device[ielement]
nid = nids[ielement]
data = [eid_device, nid, sdi.real,
bm1i.real, bm2i.real, ts1i.real, ts2i.real, afi.real, ttrqi.real, wtrqi.real,
bm1i.imag, bm2i.imag, ts1i.imag, ts2i.imag, afi.imag, ttrqi.imag, wtrqi.imag] # 17
op2.write(struct1.pack(*data))
ielement += 1
icount = 1
elif nid > 0 and icount > 0:
# 11 total nodes, with 1, 11 getting an nid; the other 9 being
# xxb sections
data = [0, 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.]
#print('***adding %s\n' % (10-icount))
for unused_i in range(10 - icount):
op2.write(struct2.pack(*data))
nwide += len(data)
eid_device2 = eids_device[ielement]
#print(eids_device)
assert eid_device == eid_device2, 'eid_device=%s eid_device2=%s' % (eid_device, eid_device2)
nid = nids[ielement]
data = [nid, sdi.real,
bm1i.real, bm2i.real, ts1i.real, ts2i.real, afi.real, ttrqi.real, wtrqi.real,
bm1i.imag, bm2i.imag, ts1i.imag, ts2i.imag, afi.imag, ttrqi.imag, wtrqi.imag] # 16
op2.write(struct2.pack(*data))
ielement += 1
icount = 0
else:
raise RuntimeError('CBEAM OEF op2 writer')
#data = [0, xxb, sxc, sxd, sxe, sxf, smax, smin, smt, smc] # 10
#op2.write(struct2.pack(*data))
#icount += 1
op2_ascii.write(' eid_device=%s data=%s\n' % (eid_device, str(data)))
nwide += len(data)
assert ntotal == nwide, 'ntotal=%s nwide=%s' % (ntotal, nwide)
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class ComplexCBendForceArray(BaseElement): # 69-CBEND
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
BaseElement.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
#if is_sort1:
#pass
#else:
#raise NotImplementedError('SORT2')
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self):
headers = [
'bending_moment_1a', 'bending_moment_2a', 'shear_1a', 'shear_2a', 'axial_a', 'torque_a',
'bending_moment_1b', 'bending_moment_2b', 'shear_1b', 'shear_2b', 'axial_b', 'torque_b',
]
return headers
def build(self):
"""sizes the vectorized attributes of the ComplexCBendForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element_node = zeros((self.nelements, 3), dtype='int32')
#[bending_moment_1a, bending_moment_2a, shear_1a, shear_2a, axial_a, torque_a
# bending_moment_1b, bending_moment_2b, shear_1b, shear_2b, axial_b, torque_b]
self.data = zeros((self.ntimes, self.nelements, 12), dtype='complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
#Freq 0.0 2.5
#ElementID Item
#6901 bending_moment_1a 1.066567-0.035549j 1.066996-0.035577j
# bending_moment_2a 1.101375-0.036709j 1.102188-0.036763j
# shear_1a 0.516478-0.017214j 0.516842-0.017239j
# shear_2a 0.859292-0.028640j 0.860111-0.028695j
# axial_a 0.834822-0.027825j 0.834982-0.027835j
# torque_a 0.953420-0.031777j 0.953947-0.031813j
# bending_moment_1b -0.284733+0.009490j -0.284828+0.009497j
# bending_moment_2b 0.094127-0.003137j 0.093836-0.003118j
# shear_1b 0.834822-0.027825j 0.834982-0.027835j
# shear_2b 0.859292-0.028640j 0.860111-0.028695j
# axial_b -0.516478+0.017214j -0.516842+0.017239j
# torque_b -0.242082+0.008069j -0.242077+0.008068j
#6902 bending_moment_1a -0.931214+0.031037j -0.931519+0.031058j
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
# element_node is (nelements, 3)
element = self.element_node[:, 0]
self.data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, element, self.data)
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.element_node, table.element_node):
assert self.element_node.shape == table.element_node.shape, 'element_node shape=%s table.shape=%s' % (self.element_node.shape, table.element_nodes.shape)
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
msg += 'Eid, Nid_A, Nid_B\n'
for (eid1, nida1, nidb1), (eid2, nida2, nidb2) in zip(self.element_node, table.element_node):
msg += '(%s, %s, %s), (%s, %s, %s)\n' % (eid1, nida1, nidb1, eid2, nida2, nidb2)
print(msg)
raise ValueError(msg)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
eids = self.element_node[:, 0]
for itime in range(self.ntimes):
for ie, eid in enumerate(eids):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(bending_moment_1a1, bending_moment_2a1, shear_1a1, shear_2a1, axial_a1, torque_a1,
bending_moment_1b1, bending_moment_2b1, shear_1b1, shear_2b1, axial_b1, torque_b1) = t1
(bending_moment_1a2, bending_moment_2a2, shear_1a2, shear_2a2, axial_a2, torque_a2,
bending_moment_1b2, bending_moment_2b2, shear_1b2, shear_2b2, axial_b2, torque_b2) = t2
if not allclose(t1, t2):
msg += '(%s) (%s, %s) (%s, %s)\n' % (
eid,
bending_moment_1a1.real,
bending_moment_1b1.real,
bending_moment_1a2.real,
bending_moment_1b2.real, )
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#if not allclose(t1, t2):
#msg += '(%s) (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n' % (
#eid,
#bending_moment_1a1, bending_moment_2a1, shear_1a1, shear_2a1, axial_a1, torque_a1,
#bending_moment_1b1, bending_moment_2b1, shear_1b1, shear_2b1, axial_b1, torque_b1,
#bending_moment_1a2, bending_moment_2a2, shear_1a2, shear_2a2, axial_a2, torque_a2,
#bending_moment_1b2, bending_moment_2b2, shear_1b2, shear_2b2, axial_b2, torque_b2)
#i += 1
#if i > 10:
#print(msg)
#raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid,
nid_a, bending_moment_1a, bending_moment_2a, shear_1a, shear_2a, axial_a, torque_a,
nid_b, bending_moment_1b, bending_moment_2b, shear_1b, shear_2b, axial_b, torque_b):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
#bending_moment_1a, bending_moment_2a, shear_1a, shear_2a, axial_a, torque_a,
#bending_moment_1b, bending_moment_2b, shear_1b, shear_2b, axial_b, torque_b
self._times[self.itime] = dt
self.element_node[self.ielement] = [eid, nid_a, nid_b]
self.data[self.itime, self.ielement, :] = [
bending_moment_1a, bending_moment_2a, shear_1a, shear_2a, axial_a, torque_a,
bending_moment_1b, bending_moment_2b, shear_1b, shear_2b, axial_b, torque_b
]
self.ielement += 1
if self.ielement == self.nelements:
self.ielement = 0
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
msg = [' C O M P L E X F O R C E S I N B E N D E L E M E N T S ( C B E N D )\n']
if is_mag_phase:
msg += [' (MAGNITUDE/PHASE)\n']
else:
msg += [' (REAL/IMAGINARY)\n']
if is_sort1:
msg += [
' - BENDING MOMENTS - - SHEARS - AXIAL'
' ELEMENT-ID GRID END PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE'
]
else:
raise NotImplementedError('sort2')
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
#' C O M P L E X F O R C E S I N B E N D E L E M E N T S ( C B E N D )'
#' (REAL/IMAGINARY)'
#' - BENDING MOMENTS - - SHEARS - AXIAL'
#' ELEMENT-ID GRID END PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE'
#'0 27 21 A 0.0 0.0 0.0 0.0 0.0 0.0'
#' 0.0 0.0 0.0 0.0 0.0 0.0'
#'0 22 B 0.0 0.0 0.0 0.0 0.0 0.0'
#' 0.0 0.0 0.0 0.0 0.0 0.0'
msg_temp = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element_node[:, 0]
nid_a = self.element_node[:, 1]
nid_b = self.element_node[:, 2]
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
bending_moment_1a = self.data[itime, :, 0]
bending_moment_2a = self.data[itime, :, 1]
shear_1a = self.data[itime, :, 2]
shear_2a = self.data[itime, :, 3]
axial_a = self.data[itime, :, 4]
torque_a = self.data[itime, :, 5]
bending_moment_1b = self.data[itime, :, 6]
bending_moment_2b = self.data[itime, :, 7]
shear_1b = self.data[itime, :, 8]
shear_2b = self.data[itime, :, 9]
axial_b = self.data[itime, :, 10]
torque_b = self.data[itime, :, 11]
for (eid,
nid_ai, bending_moment_1ai, bending_moment_2ai, shear_1ai, shear_2ai, axial_ai, torque_ai,
nid_bi, bending_moment_1bi, bending_moment_2bi, shear_1bi, shear_2bi, axial_bi, torque_bi) in zip(eids,
nid_a, bending_moment_1a, bending_moment_2a, shear_1a, shear_2a, axial_a, torque_a,
nid_b, bending_moment_1b, bending_moment_2b, shear_1b, shear_2b, axial_b, torque_b):
[bending_moment_1ari, bending_moment_2ari, shear_1ari, shear_2ari, axial_ari, torque_ari,
bending_moment_1bri, bending_moment_2bri, shear_1bri, shear_2bri, axial_bri, torque_bri,
bending_moment_1aii, bending_moment_2aii, shear_1aii, shear_2aii, axial_aii, torque_aii,
bending_moment_1bii, bending_moment_2bii, shear_1bii, shear_2bii, axial_bii, torque_bii,
] = write_imag_floats_13e(
[bending_moment_1ai, bending_moment_2ai, shear_1ai, shear_2ai, axial_ai, torque_ai,
bending_moment_1bi, bending_moment_2bi, shear_1bi, shear_2bi, axial_bi, torque_bi],
is_mag_phase)
f06_file.write(
'0 %8s%8s A %13s %13s %13s %13s %13s %s\n'
' %13s %13s %13s %13s %13s %s\n'
'0 %8s%8s B %13s %13s %13s %13s %13s %s\n'
' %13s %13s %13s %13s %13s %s\n'
% (
eid, nid_ai,
bending_moment_1ari, bending_moment_2ari, shear_1ari, shear_2ari, axial_ari, torque_ari,
bending_moment_1aii, bending_moment_2aii, shear_1aii, shear_2aii, axial_aii, torque_aii,
'', nid_bi,
bending_moment_1bri, bending_moment_2bri, shear_1bri, shear_2bri, axial_bri, torque_bri,
bending_moment_1bii, bending_moment_2bii, shear_1bii, shear_2bii, axial_bii, torque_bii,))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
class ComplexSolidPressureForceArray(ComplexForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
ComplexForceObject.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
if is_sort1:
pass
else:
raise NotImplementedError('SORT2')
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self):
headers = ['ax', 'ay', 'az', 'vx', 'vy', 'vz', 'pressure']
return headers
#def get_headers(self):
#headers = ['axial', 'torque']
#return headers
def build(self):
"""sizes the vectorized attributes of the ComplexSolidPressureForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
if self.is_built:
return
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[ax, ay, az, vx, vy, vz, pressure]
self.data = zeros((self.ntimes, self.ntotal, 7), dtype='complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
#Mode 1 2
#EigenvalueReal -0.000000 -0.000000
#EigenvalueImag -0.000000 -0.000000
#Damping 0.000000 0.000000
#ElementID Item
#1000 ax -1.887379e-13+2.791559e-13j -1.901257e-13+2.789015e-13j
# ay 3.330669e-14-7.316397e-14j 1.776357e-14-7.368508e-14j
# az -1.360023e-13-9.545406e-14j -1.432188e-13-8.333307e-14j
# vx 0.000000e+00+0.000000e+00j 0.000000e+00+0.000000e+00j
# vy 0.000000e+00+0.000000e+00j 0.000000e+00+0.000000e+00j
# vz 0.000000e+00+0.000000e+00j 0.000000e+00+0.000000e+00j
# pressure 0.000000e+00+0.000000e+00j 0.000000e+00+0.000000e+00j
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(ax1, ay1, az1, vx1, vy1, vz1, pressure1) = t1
(ax2, ay2, az2, vx2, vy2, vz2, pressure2) = t2
#rpressure1 = pressure1.real
#rpressure2 = pressure2.real
if not allclose([ax1, ay1, az1, vx1, vy1, vz1],
[ax2, ay2, az2, vx2, vy2, vz2]):
msg += '%s (%s, %s) (%s, %s)\n' % (
eid,
ax1.real, t1.imag,
ax2.real, t2.imag)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, ename, ax, ay, az, vx, vy, vz, pressure):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [ax, ay, az, vx, vy, vz, pressure]
self.ielement += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
msg = [
' ( R O O T M E A N S Q U A R E ) \n'
' C O M P L E X A C C E L E R A T I O N S V E L O C I T I E S A N D P R E S S U R E L E V E L S\n'
#' (REAL/IMAGINARY)'
#' ELE-ID EL-TYPE X-ACCELERATION Y-ACCELERATION Z-ACCELERATION X-VELOCITY Y-VELOCITY Z-VELOCITY PRESSURE (DB)'
#' 2000 PENPR 6.883253E+06 1.066544E+07 -6.883253E+06 7.288279E+05 -3.134843E+04 -7.288279E+05 1.162309E+02'
#' 1.831744E+07 -7.878719E+05 -1.831744E+07 -2.738759E+05 -4.243642E+05 2.738759E+05'
#''
]
#msg = [' C O M P L E X A C O U S T I C P R E S S U R E R E S U L T S']
#' C O M P L E X A C O U S T I C P R E S S U R E R E S U L T S'
#' (MAGNITUDE/PHASE)'
#' '
#' POINT ID. TYPE P P(RMS) DB DB(A)'
#'0 57 S 7.339671E+05 5.189931E+05 1.173135E+02 3.011353E+01'
#' 249.9102 249.9102 249.9102 249.9102'
if is_mag_phase:
msg += [' (MAGNITUDE/PHASE)\n \n']
else:
msg += [' (REAL/IMAGINARY)\n \n']
if is_sort1:
msg += [' ELE-ID EL-TYPE X-ACCELERATION Y-ACCELERATION Z-ACCELERATION X-VELOCITY Y-VELOCITY Z-VELOCITY PRESSURE (DB)\n']
#msg += [
#' POINT ID. TYPE P P(RMS) DB DB(A)\n'
#]
#' 14 0.0 / 0.0 0.0 / 0.0'
else:
raise NotImplementedError('sort2')
return msg
#def get_element_index(self, eids):
## elements are always sorted; nodes are not
#itot = searchsorted(eids, self.element) #[0]
#return itot
#def eid_to_element_node_index(self, eids):
##ind = ravel([searchsorted(self.element == eid) for eid in eids])
#ind = searchsorted(eids, self.element)
##ind = ind.reshape(ind.size)
##ind.sort()
#return ind
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
#print('len(eids)=%s nwrite=%s is_odd=%s' % (len(eids), nwrite, is_odd))
etypei = self.element_type
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
ax = self.data[itime, :, 0]
ay = self.data[itime, :, 0]
az = self.data[itime, :, 0]
vx = self.data[itime, :, 0]
vy = self.data[itime, :, 0]
vz = self.data[itime, :, 0]
pressure = self.data[itime, :, 0]
for eid, axi, ayi, azi, vxi, vyi, vzi, pressurei in zip(eids, ax, ay, az, vx, vy, vz, pressure):
out = write_imag_floats_13e([axi, ayi, azi, vxi, vyi, vzi, pressurei], is_mag_phase)
[saxr, sayr, sazr, svxr, svyr, svzr, spressurer,
saxi, sayi, sazi, svxi, svyi, svzi, spressurei] = out
#' 1000 HEXPR 1.582050E-08 5.505425E+06 2.598164E-09 -8.884337E-10 -4.806934E+04 1.046571E-10 9.968034E+01'
#' -1.116439E-08 -6.040572E+05 1.315160E-09 -1.258955E-09 -4.381078E+05 -2.067553E-10'
f06_file.write(' %8i %8s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %8s %8s %-13s %-13s %-13s %-13s %-13s %s\n\n'
% (eid, etypei, saxr, sayr, sazr, svxr, svyr, svzr, spressurer,
'', '', saxi, sayi, sazi, svxi, svyi, svzi))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2, op2_ascii, itable, new_result, date,
is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
eids_device = self.element * 10 + self.device_code
if self.is_sort1:
struct1 = Struct(endian + b'i 8s13f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('nelements=%i\n' % nelements)
etypei = self.element_type
if etypei == 76:
ename = b'HEXPR'
elif etypei == 77:
ename = b'PENPR'
elif etypei == 78:
ename = b'TETPR'
else:
raise NotImplementedError(self)
#etypeb = self.element_type#.encode('ascii')
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
ax = self.data[itime, :, 0]
ay = self.data[itime, :, 0]
az = self.data[itime, :, 0]
vx = self.data[itime, :, 0]
vy = self.data[itime, :, 0]
vz = self.data[itime, :, 0]
pressure = self.data[itime, :, 0]
for eid, eid_device, axi, ayi, azi, vxi, vyi, vzi, pressurei in zip(
eids, eids_device, ax, ay, az, vx, vy, vz, pressure):
out = write_imag_floats_13e([axi, ayi, azi, vxi, vyi, vzi, pressurei], is_mag_phase)
[saxr, sayr, sazr, svxr, svyr, svzr, spressurer,
saxi, sayi, sazi, svxi, svyi, svzi, spressurei] = out
#' 1000 HEXPR 1.582050E-08 5.505425E+06 2.598164E-09 -8.884337E-10 -4.806934E+04 1.046571E-10 9.968034E+01'
#' -1.116439E-08 -6.040572E+05 1.315160E-09 -1.258955E-09 -4.381078E+05 -2.067553E-10'
data = [
eid_device, ename,
axi.real, ayi.real, azi.real, vxi.real, vyi.real, vzi.real, pressurei.real,
axi.imag, ayi.imag, azi.imag, vxi.imag, vyi.imag, vzi.imag,
]
op2_ascii.write(' %8i %8s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %8s %8s %-13s %-13s %-13s %-13s %-13s %s\n\n'
% (eid, etypei, saxr, sayr, sazr, svxr, svyr, svzr, spressurer,
'', '', saxi, sayi, sazi, svxi, svyi, svzi))
op2.write(struct1.pack(*data))
#for eid, eid_device, fxi, fyi, fzi, mxi, myi, mzi in zip(eids, eids_device, fx, fy, fz, mx, my, mz):
#data = [
#eid_device,
#fxi.real, fyi.real, fzi.real, mxi.real, myi.real, mzi.real,
#fxi.imag, fyi.imag, fzi.imag, mxi.imag, myi.imag, mzi.imag,
#]
#vals = (fxi, fyi, fzi, mxi, myi, mzi)
#vals2 = write_imag_floats_13e(vals, is_mag_phase)
#(fxir, fyir, fzir, mxir, myir, mzir,
#fxii, fyii, fzii, mxii, myii, mzii) = vals2
#op2_ascii.write('0%26i %-13s %-13s %-13s %-13s %-13s %s\n'
#' %26s %-13s %-13s %-13s %-13s %-13s %s\n' % (
#eid, fxir, fyir, fzir, mxir, myir, mzir,
#'', fxii, fyii, fzii, mxii, myii, mzii))
#op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class ComplexCBushForceArray(ComplexForceObject):
def get_headers(self):
headers = ['fx', 'fy', 'fz', 'mx', 'my', 'mz']
return headers
def __init__(self, data_code, is_sort1, isubcase, dt):
ComplexForceObject.__init__(self, data_code, isubcase)
self.result_flag = 0
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.itime = 0
self.nelements = 0 # result specific
self.element_type = 'CBUSH'
@property
def is_real(self):
return False
@property
def is_complex(self):
return True
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def build(self):
"""sizes the vectorized attributes of the ComplexCBushForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (
#self.ntimes, self.nelements, self.ntotal, self.subtitle))
if self.is_built:
return
nnodes = 1
#self.names = []
#self.nelements //= nnodes
self.nelements /= self.ntimes
#self.ntotal //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
self.is_built = True
#print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
self._times = zeros(self.ntimes, 'float32')
self.element = zeros(self.ntotal, 'int32')
# the number is messed up because of the offset for the element's properties
if self.nelements * nnodes != self.ntotal:
msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (
self.ntimes, self.nelements, nnodes, self.nelements * nnodes, self.ntotal)
raise RuntimeError(msg)
#[fx, fy, fz, mx, my, mz]
self.data = zeros((self.ntimes, self.ntotal, 6), 'complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
#Freq 10.0
#ElementID Item
#123 fx 10000.000000+0.000021j
# fy 1000.000000+0.000002j
# fz 100.000000+0.000000j
# mx 7000.000000+0.000000j
# my 700.000000+0.000000j
# mz 70.000000+0.000000j
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for ieid, eid in enumerate(self.element):
t1 = self.data[itime, ieid, :]
t2 = table.data[itime, ieid, :]
(tx1, ty1, tz1, rx1, ry1, rz1) = t1
(tx2, ty2, tz2, rx2, ry2, rz2) = t2
d = t1 - t2
if not allclose([tx1.real, tx1.imag, ty1.real, ty1.imag],
[tx2.real, tx2.imag, ty2.real, ty2.imag], atol=0.0001):
#if not np.array_equal(t1, t2):
msg += '%-4s (%s, %sj, %s, %sj)\n (%s, %sj, %s, %sj)\n dt12=(%s, %sj, %s, %sj)\n' % (
eid,
tx1.real, tx1.imag, ty1.real, ty1.imag,
tx2.real, tx2.imag, ty2.real, ty2.imag,
d[0].real, d[0].imag, d[1].real, d[1].imag,)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, fx, fy, fz, mx, my, mz):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
#[fx, fy, fz, mx, my, mz]
self._times[self.itime] = dt
self.data[self.itime, self.itotal, :] = [fx, fy, fz, mx, my, mz]
self.element[self.itotal] = eid
self.itotal += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
else:
msg.append(' type=%s nelements=%i; table_name=%r\n' % (
self.__class__.__name__, nelements, self.table_name))
msg.append(' eType, cid\n')
msg.append(' data: [ntimes, nelements, 6] where 6=[%s]\n' % str(', '.join(self.get_headers())))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
# msg.append(' is_sort1=%s is_sort2=%s\n' % (self.is_sort1, self.is_sort2))
msg.append(' CBUSH\n')
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
#msg_temp, nnodes = get_f06_header(self, is_mag_phase, is_sort1)
# write the f06
#is_sort1 = False
if is_mag_phase:
mag_phase = ' (MAGNITUDE/PHASE)\n\n'
else:
mag_phase = ' (REAL/IMAGINARY)\n\n'
name = self.data_code['name']
if name == 'freq':
name = 'FREQUENCY'
else:
raise RuntimeError(name)
# is_sort1 = True
if is_sort1:
line2 = ' ID. FORCE-X FORCE-Y FORCE-Z MOMENT-X MOMENT-Y MOMENT-Z \n'
else:
line2 = ' %26s FORCE-X FORCE-Y FORCE-Z MOMENT-X MOMENT-Y MOMENT-Z \n' % name
# force
msg_temp = header + [
' C O M P L E X F O R C E S I N B U S H E L E M E N T S ( C B U S H ) \n',
mag_phase,
' ',
# line1,
line2,
]
if self.is_sort1:
if is_sort1:
page_num = self._write_sort1_as_sort1(f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase)
else:
page_num = self._write_sort1_as_sort2(f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase)
else:
assert self.is_sort1 == True, str(self)
return page_num - 1
def _write_sort1_as_sort1(self, f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase):
ntimes = self.data.shape[0]
eids = self.element
for itime in range(ntimes):
dt = self._times[itime]
dt_line = ' %14s = %12.5E\n' % (self.data_code['name'], dt)
header[1] = dt_line
msg = header + msg_temp
f06_file.write(''.join(msg))
#fx, fy, fz, mx, my, mz
if self.is_sort1:
fx = self.data[itime, :, 0]
fy = self.data[itime, :, 1]
fz = self.data[itime, :, 2]
mx = self.data[itime, :, 3]
my = self.data[itime, :, 4]
mz = self.data[itime, :, 5]
else:
fx = self.data[:, itime, 0]
fy = self.data[:, itime, 1]
fz = self.data[:, itime, 2]
mx = self.data[:, itime, 3]
my = self.data[:, itime, 4]
mz = self.data[:, itime, 5]
for eid, fxi, fyi, fzi, mxi, myi, mzi in zip(eids, fx, fy, fz, mx, my, mz):
vals = (fxi, fyi, fzi, mxi, myi, mzi)
vals2 = write_imag_floats_13e(vals, is_mag_phase)
(fxir, fyir, fzir, mxir, myir, mzir,
fxii, fyii, fzii, mxii, myii, mzii) = vals2
f06_file.write('0%26i %-13s %-13s %-13s %-13s %-13s %s\n'
' %26s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, fxir, fyir, fzir, mxir, myir, mzir,
'', fxii, fyii, fzii, mxii, myii, mzii))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num
def _write_sort1_as_sort2(self, f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase):
eids = self.element
times = self._times
for ieid, eid in enumerate(eids):
eid_line = ' ELEMENT-ID = %s' % (eid)
header[1] = eid_line
msg = header + msg_temp
f06_file.write(''.join(msg))
if self.is_sort1:
fx = self.data[:, ieid, 0]
fy = self.data[:, ieid, 1]
fz = self.data[:, ieid, 2]
mx = self.data[:, ieid, 3]
my = self.data[:, ieid, 4]
mz = self.data[:, ieid, 5]
else:
raise RuntimeError()
for dt, fxi, fyi, fzi, mxi, myi, mzi in zip(times, fx, fy, fz, mx, my, mz):
vals = (fxi, fyi, fzi, mxi, myi, mzi)
vals2 = write_imag_floats_13e(vals, is_mag_phase)
(fxir, fyir, fzir, mxir, myir, mzir,
fxii, fyii, fzii, mxii, myii, mzii) = vals2
f06_file.write('0%26s %-13s %-13s %-13s %-13s %-13s %s\n'
' %26s %-13s %-13s %-13s %-13s %-13s %s\n' % (
write_float_12e(dt),
fxir, fyir, fzir, mxir, myir, mzir,
'', fxii, fyii, fzii, mxii, myii, mzii))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num
def write_op2(self, op2, op2_ascii, itable, new_result, date,
is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
eids_device = self.element * 10 + self.device_code
if self.is_sort1:
struct1 = Struct(endian + b'i 12f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('nelements=%i\n' % nelements)
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
fx = self.data[itime, :, 0]
fy = self.data[itime, :, 1]
fz = self.data[itime, :, 2]
mx = self.data[itime, :, 3]
my = self.data[itime, :, 4]
mz = self.data[itime, :, 5]
for eid, eid_device, fxi, fyi, fzi, mxi, myi, mzi in zip(eids, eids_device, fx, fy, fz, mx, my, mz):
data = [
eid_device,
fxi.real, fyi.real, fzi.real, mxi.real, myi.real, mzi.real,
fxi.imag, fyi.imag, fzi.imag, mxi.imag, myi.imag, mzi.imag,
]
vals = (fxi, fyi, fzi, mxi, myi, mzi)
vals2 = write_imag_floats_13e(vals, is_mag_phase)
(fxir, fyir, fzir, mxir, myir, mzir,
fxii, fyii, fzii, mxii, myii, mzii) = vals2
op2_ascii.write('0%26i %-13s %-13s %-13s %-13s %-13s %s\n'
' %26s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, fxir, fyir, fzir, mxir, myir, mzir,
'', fxii, fyii, fzii, mxii, myii, mzii))
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class ComplexCBeamForceVUArray(BaseElement): # 191-VUBEAM
"""
**ELTYPE = 191 Beam view element (VUBEAM)**
2 PARENT I Parent p-element identification number
3 COORD I CID coordinate system identification number
4 ICORD CHAR4 ICORD flat/curved and so on TCODE,7 =0 Real
5 VUGRID I VU grid ID for output grid
6 POSIT RS x/L position of VU grid identification number
7 POS(3) RS Y, Z, W coordinate of output point
10 NX RS Normal x
11 TXY RS Shear xy
12 TZX RS Shear zx
**ELTYPE = 191 Beam view element (VUBEAM)**
TCODE,7 = 1 Real/imaginary or magnitude/phase
5 VUGRID I VU grid identification number for output grid
6 POSIT RS x/L position of VU grid identification number
7 FORCEXR RS Force x real/mag.
8 SHEARYR RS Shear force y real/mag.
9 SHEARZR RS Shear force z real/mag.
10 TORSINR RS Torsional moment x real/mag.
11 BENDYR RS Bending moment y real/mag.
12 BENDZR RS Bending moment z real/mag.
13 FORCEXI RS Force x imag./phase
14 SHEARYI RS Shear force y imag./phase
15 SHEARZI RS Shear force z imag./phase
16 TORSINI RS Torsional moment x imag./phase
17 BENDYI RS Bending moment y imag./phase
18 BENDZI RS Bending moment z imag./phase
Words 5 through max repeat 2 times
"""
def __init__(self, data_code, is_sort1, isubcase, dt):
BaseElement.__init__(self, data_code, isubcase, apply_data_code=True)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.ielement = 0
self.nelements = 0 # result specific
self.nnodes = None
#if is_sort1:
#pass
#else:
#raise NotImplementedError('SORT2')
@property
def is_real(self):
return False
@property
def is_complex(self):
return True
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self):
return ['xxb', 'force_x', 'shear_y', 'shear_z', 'torsion', 'bending_y', 'bending_z']
def build(self):
"""sizes the vectorized attributes of the ComplexCBendForceVUArray"""
#print("self.ielement = %s" % self.ielement)
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
if self.element_type in [191]: # VUBEAM
nnodes_per_element = 2
else:
raise NotImplementedError('name=%r type=%s' % (self.element_name, self.element_type))
#print('nnodes_per_element[%s, %s] = %s' % (self.isubcase, self.element_type, nnodes_per_element))
self.nnodes = nnodes_per_element
#self.nelements //= nnodes_per_element
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("***name=%s type=%s nnodes_per_element=%s ntimes=%s nelements=%s ntotal=%s" % (
#self.element_name, self.element_type, nnodes_per_element, self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = np.zeros(self.ntimes, dtype=dtype)
self.element_node = np.zeros((self.ntotal, 2), dtype='int32')
self.parent_coord = np.zeros((self.ntotal, 2), dtype='int32')
#[xxb, force_x, shear_y, shear_z, torsion, bending_y, bending_z]
self.data = np.zeros((self.ntimes, self.ntotal, 7), dtype='complex64')
#def build_dataframe(self):
#"""creates a pandas dataframe"""
#import pandas as pd
#headers = self.get_headers()
#nelements = self.element_node.shape[0] // 2
#if self.is_fiber_distance:
#fiber_distance = ['Top', 'Bottom'] * nelements
#else:
#fiber_distance = ['Mean', 'Curvature'] * nelements
#fd = np.array(fiber_distance, dtype='unicode')
#element_node = [self.element_node[:, 0], self.element_node[:, 1], fd]
#if self.nonlinear_factor not in (None, np.nan):
#column_names, column_values = self._build_dataframe_transient_header()
#self.data_frame = pd.Panel(self.data, items=column_values, major_axis=element_node, minor_axis=headers).to_frame()
#self.data_frame.columns.names = column_names
#self.data_frame.index.names = ['ElementID', 'NodeID', 'Location', 'Item']
#else:
## option B - nice!
#df1 = pd.DataFrame(element_node).T
#df1.columns = ['ElementID', 'NodeID', 'Location']
#df2 = pd.DataFrame(self.data[0])
#df2.columns = headers
#self.data_frame = df1.join(df2)
#self.data_frame = self.data_frame.reset_index().replace({'NodeID': {0:'CEN'}}).set_index(['ElementID', 'NodeID', 'Location'])
#print(self.data_frame)
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, element_nodei in enumerate(self.element_node):
(eid, nid) = element_nodei
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(xxb1, fx1, fy1, fz1, mx1, my1, mz1) = t1
(xxb2, fx2, fy2, fz2, mx2, my2, mz2) = t2
if not np.array_equal(t1, t2):
eid_nid1 = '(%s, %s) ' % (eid, nid)
eid_nid2 = ' ' * len(eid_nid1)
msg += ('%s(%s, %s, %s, %s, %s, %s, %s)\n%s(%s, %s, %s, %s, %s, %s, %s)\n' % (
eid_nid1,
xxb1, fx1, fy1, fz1, mx1, my1, mz1,
eid_nid2,
xxb2, fx2, fy2, fz2, mx2, my2, mz2))
i += 1
if i > 10:
#print(msg.replace('+0j,', '0,'))
raise ValueError(msg.replace('0j,', '0,').replace('+0j)', ')'))
#print(msg)
if i > 0:
raise ValueError(msg.replace('0j,', '0,').replace('+0j)', ')'))
return True
def _add_sort1(self, dt, eid, parent, coord, icord,
node_id, xxb, force_x, shear_y, shear_z, torsion, bending_y, bending_z):
assert eid is not None, eid
assert isinstance(node_id, int), node_id
self.element_node[self.itotal, :] = [eid, node_id]
self.parent_coord[self.itotal, :] = [parent, coord]
# TODO: save ICORD
#print('parent=%r, coord=%r, icord=%r' % (parent, coord, icord))
self.data[self.itime, self.itotal, :] = [xxb, force_x, shear_y, shear_z, torsion, bending_y, bending_z]
self.itotal += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
nnodes = self.nnodes
ntotal = self.ntotal
nlayers = 2
nelements = self.ntotal // self.nnodes // 2
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msgi = ' type=%s ntimes=%i nelements=%i nnodes_per_element=%i nlayers=%i ntotal=%i\n' % (
self.__class__.__name__, ntimes, nelements, nnodes, nlayers, ntotal)
ntimes_word = 'ntimes'
else:
msgi = ' type=%s nelements=%i nnodes_per_element=%i nlayers=%i ntotal=%i\n' % (
self.__class__.__name__, nelements, nnodes, nlayers, ntotal)
ntimes_word = '1'
msg.append(msgi)
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, ntotal, %i] where %i=[%s]\n' % (ntimes_word, n, n,
str(', '.join(headers))))
msg.append(' element_node.shape = %s\n' % str(self.element_node.shape).replace('L', ''))
msg.append(' data.shape=%s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element type: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_element_index(self, eids):
# elements are always sorted; nodes are not
itot = np.searchsorted(eids, self.element_node[:, 0]) #[0]
return itot
def eid_to_element_node_index(self, eids):
ind = np.ravel([np.searchsorted(self.element_node[:, 0] == eid) for eid in eids])
#ind = searchsorted(eids, self.element)
#ind = ind.reshape(ind.size)
#ind.sort()
return ind
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
"""
C O M P L E X F O R C E S I N P - V E R S I O N B E A M E L E M E N T S ( B E A M )
(REAL/IMAGINARY)
VU-ELEMENT ID= 100001001, P-ELEMENT ID = 1, OUTPUT COORD. ID= 0, P OF EDGES = 3
VUGRID VUGRID DIST/ - BENDING MOMENTS - -WEB SHEARS - AXIAL TOTAL
ID. LENGTH PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE
111001001 0.000 0.000000E+00 -1.598690E+05 0.000000E+00 -1.040952E+06 0.000000E+00 0.000000E+00
0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
111001002 0.333 0.000000E+00 5.328967E+04 0.000000E+00 1.872484E+05 0.000000E+00 0.000000E+00
0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
C O M P L E X S T R A I N S I N P - V E R S I O N B E A M E L E M E N T S ( B E A M )
(REAL/IMAGINARY)
VU-ELEMENT ID= 100001003, P-ELEMENT ID = 1, OUTPUT COORD. ID= 0, P OF EDGES = 3
VUGRID VUGRID DIST/ LOCATION LOCATION LOCATION LOCATION
ID. LENGTH C D E F
111001003 0.667 -2.557904E+00 -2.557904E+00 2.557904E+00 2.557904E+00
0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
111001004 1.000 7.673713E+00 7.673713E+00 -7.673713E+00 -7.673713E+00
0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
"""
msg = [
' C O M P L E X F O R C E S I N P - V E R S I O N B E A M E L E M E N T S ( B E A M )\n'
' (REAL/IMAGINARY)\n'
' VU-ELEMENT ID= %9i, P-ELEMENT ID =%8i, OUTPUT COORD. ID=%8i, P OF EDGES = 3\n'
'\n'
' VUGRID VUGRID DIST/ - BENDING MOMENTS - -WEB SHEARS - AXIAL TOTAL \n'
' ID. LENGTH PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE \n'
#' 111001003 0.667 0.000000E+00 5.328967E+04 0.000000E+00 -1.872484E+05 0.000000E+00 0.000000E+00'
#' 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00'
#' 111001004 1.000 0.000000E+00 -1.598690E+05 0.000000E+00 1.040952E+06 0.000000E+00 0.000000E+00'
#' 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00'
#' C O M P L E X S T R A I N S I N P - V E R S I O N B E A M E L E M E N T S ( B E A M )\n'
#' (REAL/IMAGINARY)\n'
#' VU-ELEMENT ID= %9i, P-ELEMENT ID = 1, OUTPUT COORD. ID= 0, P OF EDGES = 3\n'
#'\n'
#' VUGRID VUGRID DIST/ LOCATION LOCATION LOCATION LOCATION \n'
#' ID. LENGTH C D E F \n'
#' 111001003 0.667 -2.557904E+00 -2.557904E+00 2.557904E+00 2.557904E+00'
#' 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00'
#' 111001004 1.000 7.673713E+00 7.673713E+00 -7.673713E+00 -7.673713E+00'
#' 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00'
]
if header is None:
header = []
#msg, nnodes, cen = _get_plate_msg(self)
# write the f06
ntimes = self.data.shape[0]
eids = self.element_node[:, 0]
nids = self.element_node[:, 1]
parent = self.parent_coord[:, 0]
coord = self.parent_coord[:, 1]
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
#[xxb, force_x, shear_y, shear_z, torsion, bending_y, bending_z]
xxb = self.data[itime, :, 0]
fx = self.data[itime, :, 1]
fy = self.data[itime, :, 2]
fz = self.data[itime, :, 3]
mx = self.data[itime, :, 4]
my = self.data[itime, :, 5]
mz = self.data[itime, :, 6]
for (i, eid, parenti, coordi, nid, xxbi, fxi, fyi, fzi, mxi, myi, mzi) in zip(
cycle(range(2)), eids, parent, coord, nids, xxb, fx, fy, fz, mx, my, mz):
if i == 0:
f06_file.write(''.join(header + msg) % (eid, parenti, coordi))
#out = write_imag_floats_13e([fxi, fyi, fzi, mxi, myi, mzi], is_mag_phase=is_mag_phase)
#[fxri, fyri, fzri, mxri, myri, mzri,
#fxii, fyii, fzii, mxii, myii, mzii] = out
# nid xxb
f06_file.write(
' %9i %.3f %13.6E %13.6E %13.6E %13.6E %13.6E %13.6E\n'
' %13.6E %13.6E %13.6E %13.6E %13.6E %13.6E\n' % (
nid, xxbi.real,
myi.real, mzi.real, fyi.real, fzi.real, fxi.real, mxi.real,
myi.imag, mzi.imag, fyi.imag, fzi.imag, fxi.imag, mxi.imag,
))
# stress/strain
#f06_file.write(
#' %9i %.3s %13.6E %13.6E %13.6E %13.6E %13.6E %13.6E\n'
#' %13.6E %13.6E %13.6E %13.6E %13.6E %13.6E\n' % (
#nid, xxbi.real,
#fxi.real, fyi.real, fzi.real, mxi.real, myi.real, mzi.real,
#fxi.imag, fyi.imag, fzi.imag, mxi.imag, myi.imag, mzi.imag,
#))
if i == 1:
f06_file.write(page_stamp % page_num + '\n')
page_num += 1
return page_num - 1
class ComplexForceVU_2DArray(BaseElement): # 189-VUQUAD,190-VUTRIA
def __init__(self, data_code, is_sort1, isubcase, dt):
BaseElement.__init__(self, data_code, isubcase)
#self.parent = {}
#self.coord = {}
#self.icord = {}
#self.theta = {}
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
self.ntimes = 0
# TODO if dt=None, handle SORT1 case
self.dt = dt
#if is_sort1:
#if dt is not None:
#self.add = self.add_sort1
#else:
#assert dt is not None
#self.add = self.add_sort2
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def build(self):
"""sizes the vectorized attributes of the ComplexCShearForceArray"""
#print('%s ntimes=%s nelements=%s ntotal=%s' % (
#self.element_type, self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.ntotal = self.nelements
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element_node = zeros((self.nelements, 2), dtype='int32')
#[membrane_x, membrane_y, membrane_xy, bending_x, bending_y, bending_xy,
# shear_yz, shear_xz]
self.data = zeros((self.ntimes, self.nelements, 8), dtype='complex64')
def get_headers(self):
headers = [
'membrane_x', 'membrane_y', 'membrane_xy',
'bending_x', 'bending_y', 'bending_xy',
'shear_yz', 'shear_xz']
return headers
def add_sort1(self, nnodes, dt, eid, parent, coord, icord, theta, vugrids, forces):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
#self.parent[eid] = parent
#self.coord[eid] = coord
#self.icord[eid] = icord
#self.theta[eid] = theta
for vugrid, force in zip(vugrids, forces):
self.element_node[self.ielement, :] = [eid, vugrid]
self.data[self.itime, self.ielement, :] = force
self.ielement += 1
#' C O M P L E X F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 8 )'
#' (REAL/IMAGINARY)'
#' '
#' ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -'
#' ID GRID-ID FX FY FXY MX MY MXY QX QY'
#'0 100 CEN/8 0.0 0.0 0.0 0.0 0.0 0.0 -3.492460E-10 -1.368206E-09'
#' 0.0 0.0 0.0 0.0 0.0 0.0 2.910383E-11 5.088840E-10'
#''
| 46.341074
| 199
| 0.489854
| 22,798
| 185,596
| 3.847618
| 0.050882
| 0.033288
| 0.008037
| 0.009576
| 0.886751
| 0.877528
| 0.863073
| 0.846691
| 0.831654
| 0.819011
| 0
| 0.081393
| 0.392234
| 185,596
| 4,004
| 200
| 46.352647
| 0.696256
| 0.239429
| 0
| 0.792863
| 0
| 0.022498
| 0.13259
| 0.000451
| 0
| 0
| 0
| 0.000749
| 0.02948
| 1
| 0.05896
| false
| 0.001164
| 0.010085
| 0.003879
| 0.117145
| 0.006206
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a78f9ebf300bb448c7355856b3a3a95dcc73579e
| 26,507
|
py
|
Python
|
django_evolution/tests/test_preprocessing.py
|
kamrankalantarli/django-evolution
|
3e67426b189aecca5e470607838d1191f4892859
|
[
"BSD-3-Clause"
] | null | null | null |
django_evolution/tests/test_preprocessing.py
|
kamrankalantarli/django-evolution
|
3e67426b189aecca5e470607838d1191f4892859
|
[
"BSD-3-Clause"
] | null | null | null |
django_evolution/tests/test_preprocessing.py
|
kamrankalantarli/django-evolution
|
3e67426b189aecca5e470607838d1191f4892859
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
from django_evolution.mutations import (AddField, ChangeField, DeleteField,
DeleteModel, RenameField, RenameModel,
SQLMutation)
from django_evolution.tests.base_test_case import EvolutionTestCase
class PreprocBaseModel(models.Model):
my_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
class ReffedPreprocModel(models.Model):
value = models.IntegerField()
class PreprocessingTests(EvolutionTestCase):
"""Testing pre-processing of mutations."""
sql_mapping_key = 'preprocessing'
default_base_model = PreprocBaseModel
def test_add_delete_field(self):
"""Testing pre-processing AddField + DeleteField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
self.perform_evolution_tests(
DestModel,
[
AddField('TestModel', 'added_field', models.CharField,
initial='', max_length=20),
DeleteField('TestModel', 'added_field'),
],
'',
[],
'noop',
expect_noop=True)
def test_add_delete_add_field(self):
"""Testing pre-processing AddField + DeleteField + AddField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
added_field = models.IntegerField()
self.perform_evolution_tests(
DestModel,
[
AddField('TestModel', 'added_field', models.CharField,
initial='', max_length=20),
DeleteField('TestModel', 'added_field'),
AddField('TestModel', 'added_field', models.IntegerField,
initial=42)
],
("In model tests.TestModel:\n"
" Field 'added_field' has been added"),
[
"AddField('TestModel', 'added_field', models.IntegerField,"
" initial=<<USER VALUE REQUIRED>>)",
],
'add_delete_add_field')
def test_add_delete_add_rename_field(self):
"""Testing pre-processing AddField + DeleteField + AddField +
RenameField
"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
renamed_field = models.IntegerField()
self.perform_evolution_tests(
DestModel,
[
AddField('TestModel', 'added_field', models.CharField,
initial='', max_length=20),
DeleteField('TestModel', 'added_field'),
AddField('TestModel', 'added_field', models.IntegerField,
initial=42),
RenameField('TestModel', 'added_field', 'renamed_field'),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added"),
[
"AddField('TestModel', 'renamed_field', models.IntegerField,"
" initial=<<USER VALUE REQUIRED>>)",
],
'add_delete_add_rename_field')
def test_add_change_field(self):
"""Testing pre-processing AddField + ChangeField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
added_field = models.CharField(max_length=50, null=True)
self.perform_evolution_tests(
DestModel,
[
AddField('TestModel', 'added_field', models.CharField,
initial='foo', max_length=20),
ChangeField('TestModel', 'added_field', null=True,
initial='bar', max_length=50),
],
("In model tests.TestModel:\n"
" Field 'added_field' has been added"),
[
"AddField('TestModel', 'added_field', models.CharField,"
" max_length=50, null=True)",
],
'add_change_field')
def test_add_change_change_field(self):
"""Testing pre-processing AddField + ChangeField + ChangeField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
added_field = models.CharField(max_length=50, null=True)
self.perform_evolution_tests(
DestModel,
[
AddField('TestModel', 'added_field', models.CharField,
initial='foo', max_length=20),
ChangeField('TestModel', 'added_field', null=True,
initial='bar', max_length=30),
ChangeField('TestModel', 'added_field',
initial='bar', max_length=50),
],
("In model tests.TestModel:\n"
" Field 'added_field' has been added"),
[
"AddField('TestModel', 'added_field', models.CharField,"
" max_length=50, null=True)",
],
'add_change_field')
def test_add_change_delete_field(self):
"""Testing pre-processing AddField + ChangeField + DeleteField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
self.perform_evolution_tests(
DestModel,
[
AddField('TestModel', 'added_field', models.CharField,
initial='foo', max_length=20),
ChangeField('TestModel', 'added_field', null=True),
DeleteField('TestModel', 'added_field'),
],
'',
[],
'noop',
expect_noop=True)
def test_add_change_rename_field(self):
"""Testing pre-processing AddField + ChangeField + RenameField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
renamed_field = models.CharField(max_length=50, null=True)
self.perform_evolution_tests(
DestModel,
[
AddField('TestModel', 'added_field', models.CharField,
initial='foo', max_length=20),
ChangeField('TestModel', 'added_field', null=True,
initial='bar', max_length=50),
RenameField('TestModel', 'added_field', 'renamed_field'),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added"),
[
"AddField('TestModel', 'renamed_field', models.CharField,"
" max_length=50, null=True)",
],
'add_change_rename_field')
def test_add_rename_change_field(self):
"""Testing pre-processing AddField + RenameField + ChangeField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
renamed_field = models.CharField(max_length=50, null=True)
self.perform_evolution_tests(
DestModel,
[
AddField('TestModel', 'added_field', models.CharField,
initial='foo', max_length=20),
RenameField('TestModel', 'added_field', 'renamed_field'),
ChangeField('TestModel', 'renamed_field', null=True,
initial='bar', max_length=50),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added"),
[
"AddField('TestModel', 'renamed_field', models.CharField,"
" max_length=50, null=True)",
],
'add_rename_change_field')
def test_add_rename_change_rename_change_field(self):
"""Testing pre-processing AddField + RenameField + ChangeField +
RenameField + ChangeField
"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
renamed_field = models.CharField(max_length=50, null=True)
self.perform_evolution_tests(
DestModel,
[
AddField('TestModel', 'added_field', models.CharField,
initial='foo', max_length=20),
RenameField('TestModel', 'added_field', 'foo_field'),
ChangeField('TestModel', 'foo_field', null=True),
RenameField('TestModel', 'foo_field', 'renamed_field'),
ChangeField('TestModel', 'renamed_field', max_length=50),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added"),
[
"AddField('TestModel', 'renamed_field', models.CharField,"
" max_length=50, null=True)",
],
'add_rename_change_rename_change_field')
def test_add_rename_delete(self):
"""Testing pre-processing AddField + RenameField + DeleteField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
self.perform_evolution_tests(
DestModel,
[
AddField('TestModel', 'added_field', models.CharField,
initial='foo', max_length=20),
RenameField('TestModel', 'added_field', 'renamed_field'),
DeleteField('TestModel', 'renamed_field'),
],
'',
[],
'noop',
expect_noop=True)
def test_add_rename_field_with_db_column(self):
"""Testing pre-processing AddField + RenameField with
RenameField.db_column
"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
renamed_field = models.CharField(max_length=50, null=True,
db_column='added_field')
self.perform_evolution_tests(
DestModel,
[
AddField('TestModel', 'added_field', models.CharField,
max_length=50, null=True),
RenameField('TestModel', 'added_field', 'renamed_field',
db_column='added_field'),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added"),
[
"AddField('TestModel', 'renamed_field', models.CharField,"
" db_column='added_field', max_length=50, null=True)",
],
'add_rename_field_with_db_column')
def test_add_field_rename_model(self):
"""Testing pre-processing AddField + RenameModel"""
class RenamedReffedPreprocModel(models.Model):
value = models.IntegerField()
class Meta:
db_table = 'tests_reffedpreprocmodel'
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
added_field = models.ForeignKey(RenamedReffedPreprocModel,
null=True,
on_delete=models.CASCADE)
self.set_base_model(
self.default_base_model,
extra_models=[
('ReffedPreprocModel', ReffedPreprocModel)
])
# Prepare the renamed model in the end signature.
end, end_sig = self.make_end_signatures(DestModel, 'TestModel')
end_app_sig = end_sig.get_app_sig('tests')
end_model_sig = end_app_sig.get_model_sig('ReffedPreprocModel').clone()
end_model_sig.model_name = 'RenamedReffedPreprocModel'
end_app_sig.remove_model_sig('ReffedPreprocModel')
end_app_sig.add_model_sig(end_model_sig)
end_field_sig = (
end_app_sig
.get_model_sig('TestModel')
.get_field_sig('added_field')
)
end_field_sig.related_model = 'tests.RenamedReffedPreprocModel'
self.perform_evolution_tests(
DestModel,
[
AddField('TestModel', 'added_field', models.ForeignKey,
null=True, related_model='tests.ReffedPreprocModel'),
RenameModel('ReffedPreprocModel', 'RenamedReffedPreprocModel',
db_table='tests_reffedpreprocmodel'),
],
("The model tests.ReffedPreprocModel has been deleted\n"
"In model tests.TestModel:\n"
" Field 'added_field' has been added"),
[
"AddField('TestModel', 'added_field', models.ForeignKey,"
" null=True, related_model='tests.RenamedReffedPreprocModel')",
"DeleteModel('ReffedPreprocModel')",
],
'add_field_rename_model',
end=end,
end_sig=end_sig)
def test_add_rename_field_rename_model(self):
"""Testing pre-processing AddField + RenameField + RenameModel"""
class RenamedReffedPreprocModel(models.Model):
value = models.IntegerField()
class Meta:
db_table = 'tests_reffedpreprocmodel'
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
renamed_field = models.ForeignKey(RenamedReffedPreprocModel,
null=True,
on_delete=models.CASCADE)
self.set_base_model(
self.default_base_model,
extra_models=[
('ReffedPreprocModel', ReffedPreprocModel)
])
# Prepare the renamed model in the end signature.
end, end_sig = self.make_end_signatures(DestModel, 'TestModel')
end_app_sig = end_sig.get_app_sig('tests')
end_model_sig = end_app_sig.get_model_sig('ReffedPreprocModel').clone()
end_model_sig.model_name = 'RenamedReffedPreprocModel'
end_app_sig.remove_model_sig('ReffedPreprocModel')
end_app_sig.add_model_sig(end_model_sig)
end_field_sig = (
end_app_sig
.get_model_sig('TestModel')
.get_field_sig('renamed_field')
)
end_field_sig.related_model = 'tests.RenamedReffedPreprocModel'
self.perform_evolution_tests(
DestModel,
[
AddField('TestModel', 'added_field', models.ForeignKey,
null=True,
related_model='tests.ReffedPreprocModel'),
RenameField('TestModel', 'added_field', 'renamed_field'),
RenameModel('ReffedPreprocModel', 'RenamedReffedPreprocModel',
db_table='tests_reffedpreprocmodel'),
],
("The model tests.ReffedPreprocModel has been deleted\n"
"In model tests.TestModel:\n"
" Field 'renamed_field' has been added"),
[
"AddField('TestModel', 'renamed_field', models.ForeignKey,"
" null=True, related_model='tests.RenamedReffedPreprocModel')",
"DeleteModel('ReffedPreprocModel')",
],
'add_rename_field_rename_model',
end=end,
end_sig=end_sig)
def test_add_sql_delete(self):
"""Testing pre-processing AddField + SQLMutation + DeleteField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
self.perform_evolution_tests(
DestModel,
[
AddField('TestModel', 'added_field', models.CharField,
initial='foo', max_length=20),
SQLMutation('dummy-sql',
['-- Comment --'],
lambda app_label, proj_sig: None),
DeleteField('TestModel', 'added_field'),
],
'',
[
"DeleteField('TestModel', 'char_field')",
],
'add_sql_delete',
expect_noop=True)
def test_change_delete_field(self):
"""Testing pre-processing ChangeField + DeleteField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
self.perform_evolution_tests(
DestModel,
[
ChangeField('TestModel', 'char_field', null=True),
DeleteField('TestModel', 'char_field'),
],
("In model tests.TestModel:\n"
" Field 'char_field' has been deleted"),
[
"DeleteField('TestModel', 'char_field')",
],
'delete_char_field')
def test_change_rename_field(self):
"""Testing pre-processing ChangeField + RenameField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
renamed_field = models.CharField(max_length=20, null=True)
self.perform_evolution_tests(
DestModel,
[
ChangeField('TestModel', 'char_field', null=True),
RenameField('TestModel', 'char_field', 'renamed_field'),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added\n"
" Field 'char_field' has been deleted"),
[
"AddField('TestModel', 'renamed_field', models.CharField,"
" max_length=20, null=True)",
"DeleteField('TestModel', 'char_field')",
],
'change_rename_field')
def test_change_rename_change_rename_field(self):
"""Testing pre-processing ChangeField + RenameField + ChangeField +
RenameField
"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
renamed_field = models.CharField(max_length=30, null=True)
self.perform_evolution_tests(
DestModel,
[
ChangeField('TestModel', 'char_field', max_length=30),
RenameField('TestModel', 'char_field', 'foo_field'),
ChangeField('TestModel', 'foo_field', null=True),
RenameField('TestModel', 'foo_field', 'renamed_field'),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added\n"
" Field 'char_field' has been deleted"),
[
"AddField('TestModel', 'renamed_field', models.CharField,"
" max_length=30, null=True)",
"DeleteField('TestModel', 'char_field')",
],
'change_rename_change_rename_field')
def test_change_rename_delete_field(self):
"""Testing pre-processing ChangeField + RenameField + DeleteField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
self.perform_evolution_tests(
DestModel,
[
ChangeField('TestModel', 'char_field', null=True),
RenameField('TestModel', 'char_field', 'renamed_field'),
DeleteField('TestModel', 'renamed_field'),
],
("In model tests.TestModel:\n"
" Field 'char_field' has been deleted"),
[
"DeleteField('TestModel', 'char_field')",
],
'delete_char_field')
def test_rename_add_field(self):
"""Testing pre-processing RenameField + AddField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
renamed_field = models.CharField(max_length=20)
char_field = models.CharField(max_length=50, null=True)
self.perform_evolution_tests(
DestModel,
[
RenameField('TestModel', 'char_field', 'renamed_field'),
AddField('TestModel', 'char_field', models.CharField,
max_length=50, null=True),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added\n"
" In field 'char_field':\n"
" Property 'max_length' has changed\n"
" Property 'null' has changed"),
[
"AddField('TestModel', 'renamed_field', models.CharField,"
" initial=<<USER VALUE REQUIRED>>, max_length=20)",
"ChangeField('TestModel', 'char_field', initial=None,"
" max_length=50, null=True)",
],
'rename_add_field')
def test_rename_delete_field(self):
"""Testing pre-processing RenameField + DeleteField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
self.perform_evolution_tests(
DestModel,
[
RenameField('TestModel', 'char_field', 'renamed_field'),
DeleteField('TestModel', 'renamed_field'),
],
("In model tests.TestModel:\n"
" Field 'char_field' has been deleted"),
[
"DeleteField('TestModel', 'char_field')",
],
'delete_char_field')
def test_rename_change_delete_field(self):
"""Testing pre-processing RenameField + ChangeField + DeleteField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
self.perform_evolution_tests(
DestModel,
[
RenameField('TestModel', 'char_field', 'renamed_field'),
ChangeField('TestModel', 'renamed_field', null=True),
DeleteField('TestModel', 'renamed_field'),
],
("In model tests.TestModel:\n"
" Field 'char_field' has been deleted"),
[
"DeleteField('TestModel', 'char_field')",
],
'delete_char_field')
def test_rename_change_rename_change_field(self):
"""Testing pre-processing RenameField + ChangeField + RenameField +
ChangeField
"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
renamed_field = models.CharField(max_length=50, null=True)
self.perform_evolution_tests(
DestModel,
[
RenameField('TestModel', 'char_field', 'foo_field'),
ChangeField('TestModel', 'foo_field', max_length=30,
null=True),
RenameField('TestModel', 'foo_field', 'renamed_field'),
ChangeField('TestModel', 'renamed_field', max_length=50),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added\n"
" Field 'char_field' has been deleted"),
[
"AddField('TestModel', 'renamed_field', models.CharField,"
" max_length=50, null=True)",
"DeleteField('TestModel', 'char_field')",
],
'rename_change_rename_change_field')
def test_rename_rename_field(self):
"""Testing pre-processing RenameField + RenameField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
renamed_field = models.CharField(max_length=20)
self.perform_evolution_tests(
DestModel,
[
RenameField('TestModel', 'char_field', 'foo_field'),
RenameField('TestModel', 'foo_field', 'renamed_field'),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added\n"
" Field 'char_field' has been deleted"),
[
"AddField('TestModel', 'renamed_field', models.CharField,"
" initial=<<USER VALUE REQUIRED>>, max_length=20)",
"DeleteField('TestModel', 'char_field')",
],
'rename_rename_field')
def test_rename_rename_model(self):
"""Testing pre-processing RenameModel + RenameModel"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
class Meta:
db_table = 'tests_testmodel'
self.perform_evolution_tests(
DestModel,
[
RenameModel('TestModel', 'TempModel',
db_table='tests_testmodel'),
RenameModel('TempModel', 'DestModel',
db_table='tests_testmodel'),
],
"The model tests.TestModel has been deleted",
[
"DeleteModel('TestModel')",
],
'noop',
model_name='DestModel')
def test_rename_delete_model(self):
"""Testing pre-processing RenameModel + DeleteModel"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
class Meta:
db_table = 'tests_testmodel'
self.perform_evolution_tests(
DestModel,
[
RenameModel('TestModel', 'TempModel',
db_table='tests_testmodel'),
DeleteModel('TempModel'),
],
"The model tests.TestModel has been deleted",
[
"DeleteModel('TestModel')",
],
'rename_delete_model',
model_name='DestModel')
| 39.503726
| 79
| 0.551967
| 2,402
| 26,507
| 5.828893
| 0.044963
| 0.051068
| 0.075709
| 0.064067
| 0.930576
| 0.91672
| 0.878223
| 0.841511
| 0.812085
| 0.783158
| 0
| 0.007459
| 0.342476
| 26,507
| 670
| 80
| 39.562687
| 0.795857
| 0.060361
| 0
| 0.739602
| 0
| 0
| 0.257189
| 0.056744
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045208
| false
| 0
| 0.007233
| 0
| 0.122966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ac0f523686ef2719f03ef73735413b04f172a002
| 68
|
py
|
Python
|
GitMarco/graphics/__init__.py
|
GitMarco27/GitMarco
|
2d9dd93a73a6d7b68d63222512a646cdd988909e
|
[
"MIT"
] | null | null | null |
GitMarco/graphics/__init__.py
|
GitMarco27/GitMarco
|
2d9dd93a73a6d7b68d63222512a646cdd988909e
|
[
"MIT"
] | null | null | null |
GitMarco/graphics/__init__.py
|
GitMarco27/GitMarco
|
2d9dd93a73a6d7b68d63222512a646cdd988909e
|
[
"MIT"
] | null | null | null |
import GitMarco.graphics.plotly
import GitMarco.graphics.matplotlib
| 22.666667
| 35
| 0.882353
| 8
| 68
| 7.5
| 0.625
| 0.466667
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 68
| 2
| 36
| 34
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ac3642eebb503f23b120e4d69a31403f3e4a186f
| 40,757
|
py
|
Python
|
tests/data/service_github.py
|
David-Le-Nir/sphinxcontrib-needs
|
fe809445505fa1e9bf5963eab1d6283dad405e92
|
[
"MIT"
] | 90
|
2016-11-30T21:23:10.000Z
|
2022-01-11T16:33:56.000Z
|
tests/data/service_github.py
|
David-Le-Nir/sphinxcontrib-needs
|
fe809445505fa1e9bf5963eab1d6283dad405e92
|
[
"MIT"
] | 359
|
2016-12-02T14:53:44.000Z
|
2022-03-31T11:59:03.000Z
|
tests/data/service_github.py
|
David-Le-Nir/sphinxcontrib-needs
|
fe809445505fa1e9bf5963eab1d6283dad405e92
|
[
"MIT"
] | 25
|
2018-06-20T18:56:13.000Z
|
2022-03-25T06:11:40.000Z
|
# :noqa
# Debug data set, which is used for all requests against github api by official docs.
# Needed to avoid external service calls during tests
# Response for a search api call
GITHUB_ISSUE_SEARCH_ANSWER = {
"total_count": 2,
"incomplete_results": False,
"items": [
{
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/issues/141",
"repository_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs",
"labels_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/issues/141/labels{/name}",
"comments_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/issues/141/comments",
"events_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/issues/141/events",
"html_url": "https://github.com/useblocks/sphinxcontrib-needs/issues/141",
"id": 586783574,
"node_id": "MDU6SXNzdWU1ODY3ODM1NzQ=",
"number": 100,
"title": "A node can only be in one page, else it will be cut when generate latexpdf ",
"user": {
"login": "sophiali2008",
"id": 62423175,
"node_id": "MDQ6VXNlcjYyNDIzMTc1",
"avatar_url": "https://avatars.githubusercontent.com/u/62423175?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/sophiali2008",
"html_url": "https://github.com/sophiali2008",
"followers_url": "https://api.github.com/users/sophiali2008/followers",
"following_url": "https://api.github.com/users/sophiali2008/following{/other_user}",
"gists_url": "https://api.github.com/users/sophiali2008/gists{/gist_id}",
"starred_url": "https://api.github.com/users/sophiali2008/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/sophiali2008/subscriptions",
"organizations_url": "https://api.github.com/users/sophiali2008/orgs",
"repos_url": "https://api.github.com/users/sophiali2008/repos",
"events_url": "https://api.github.com/users/sophiali2008/events{/privacy}",
"received_events_url": "https://api.github.com/users/sophiali2008/received_events",
"type": "User",
"site_admin": False,
},
"labels": [
{
"id": 491973814,
"node_id": "MDU6TGFiZWw0OTE5NzM4MTQ=",
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/labels/bug",
"name": "bug",
"color": "ee0701",
"default": True,
"description": None,
}
],
"state": "open",
"locked": False,
"assignee": None,
"assignees": [],
"milestone": {
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/milestones/6",
"html_url": "https://github.com/useblocks/sphinxcontrib-needs/milestone/6",
"labels_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/milestones/6/labels",
"id": 5182610,
"node_id": "MDk6TWlsZXN0b25lNTE4MjYxMA==",
"number": 6,
"title": "0.5.5",
"description": None,
"creator": {
"login": "danwos",
"id": 998700,
"node_id": "MDQ6VXNlcjk5ODcwMA==",
"avatar_url": "https://avatars.githubusercontent.com/u/998700?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/danwos",
"html_url": "https://github.com/danwos",
"followers_url": "https://api.github.com/users/danwos/followers",
"following_url": "https://api.github.com/users/danwos/following{/other_user}",
"gists_url": "https://api.github.com/users/danwos/gists{/gist_id}",
"starred_url": "https://api.github.com/users/danwos/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/danwos/subscriptions",
"organizations_url": "https://api.github.com/users/danwos/orgs",
"repos_url": "https://api.github.com/users/danwos/repos",
"events_url": "https://api.github.com/users/danwos/events{/privacy}",
"received_events_url": "https://api.github.com/users/danwos/received_events",
"type": "User",
"site_admin": False,
},
"open_issues": 2,
"closed_issues": 0,
"state": "open",
"created_at": "2020-03-09T13:53:46Z",
"updated_at": "2020-03-24T08:50:03Z",
"due_on": None,
"closed_at": None,
},
"comments": 3,
"created_at": "2020-03-24T08:30:43Z",
"updated_at": "2021-02-12T11:57:06Z",
"closed_at": None,
"author_association": "NONE",
"active_lock_reason": None,
"body": "Hello,\r\nI used the extension needs 0.5.3 in my project, it is perfectly fit in html. \r\nBut when I "
"generate latexpdf, the content is cut if it is more than one page. Is it possible to generate it like "
"a longtable which adjust the content in different continues pages ?",
"performed_via_github_app": None,
"score": 1.0,
}
],
}
# Response of a specific element from github api
GITHUB_SPECIFIC_ISSUE_ANSWER = {
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/issues/141",
"repository_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs",
"labels_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/issues/" "141/labels{/name}",
"comments_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/issues/141/comments",
"events_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/issues/141/events",
"html_url": "https://github.com/useblocks/sphinxcontrib-needs/issues/141",
"id": 586783574,
"node_id": "MDU6SXNzdWU1ODY3ODM1NzQ=",
"number": 340,
"title": "A node can only be in one page, else it will be cut when generate latexpdf ",
"user": {
"login": "sophiali2008",
"id": 62423175,
"node_id": "MDQ6VXNlcjYyNDIzMTc1",
"avatar_url": "https://avatars.githubusercontent.com/u/62423175?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/sophiali2008",
"html_url": "https://github.com/sophiali2008",
"followers_url": "https://api.github.com/users/sophiali2008/followers",
"following_url": "https://api.github.com/users/sophiali2008/following{/other_user}",
"gists_url": "https://api.github.com/users/sophiali2008/gists{/gist_id}",
"starred_url": "https://api.github.com/users/sophiali2008/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/sophiali2008/subscriptions",
"organizations_url": "https://api.github.com/users/sophiali2008/orgs",
"repos_url": "https://api.github.com/users/sophiali2008/repos",
"events_url": "https://api.github.com/users/sophiali2008/events{/privacy}",
"received_events_url": "https://api.github.com/users/sophiali2008/received_events",
"type": "User",
"site_admin": False,
},
"labels": [
{
"id": 491973814,
"node_id": "MDU6TGFiZWw0OTE5NzM4MTQ=",
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs" "/labels/bug",
"name": "bug",
"color": "ee0701",
"default": True,
"description": None,
}
],
"state": "open",
"locked": False,
"assignee": None,
"assignees": [],
"milestone": {
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/milestones/6",
"html_url": "https://github.com/useblocks/sphinxcontrib-needs/milestone/6",
"labels_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/milestones/6/labels",
"id": 5182610,
"node_id": "MDk6TWlsZXN0b25lNTE4MjYxMA==",
"number": 6,
"title": "0.5.5",
"description": None,
"creator": {
"login": "danwos",
"id": 998700,
"node_id": "MDQ6VXNlcjk5ODcwMA==",
"avatar_url": "https://avatars.githubusercontent.com/u/998700?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/danwos",
"html_url": "https://github.com/danwos",
"followers_url": "https://api.github.com/users/danwos/followers",
"following_url": "https://api.github.com/users/danwos/following{/other_user}",
"gists_url": "https://api.github.com/users/danwos/gists{/gist_id}",
"starred_url": "https://api.github.com/users/danwos/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/danwos/subscriptions",
"organizations_url": "https://api.github.com/users/danwos/orgs",
"repos_url": "https://api.github.com/users/danwos/repos",
"events_url": "https://api.github.com/users/danwos/events{/privacy}",
"received_events_url": "https://api.github.com/users/danwos/received_events",
"type": "User",
"site_admin": False,
},
"open_issues": 2,
"closed_issues": 0,
"state": "open",
"created_at": "2020-03-09T13:53:46Z",
"updated_at": "2020-03-24T08:50:03Z",
"due_on": None,
"closed_at": None,
},
"comments": 3,
"created_at": "2020-03-24T08:30:43Z",
"updated_at": "2021-02-12T11:57:06Z",
"closed_at": None,
"author_association": "NONE",
"active_lock_reason": None,
"body": "Hello,\r\nI used the extension needs 0.5.3 in my project, it is perfectly fit in html. \r\nBut when I "
"generate latexpdf, the content is cut if it is more than one page. Is it possible to generate it like a "
"longtable which adjust the content in different continues pages ?",
"performed_via_github_app": None,
"score": 1.0,
}
GITHUB_SPECIFIC_COMMIT_ANSWER = {
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/commits"
"/dc14807f0b247e758bbcbef289 "
"53444f18b503e3",
"sha": "dc14807f0b247e758bbcbef28953444f18b503e3",
"node_id": "MDY6Q29tbWl0NzUyMjU5NDk6ZGMxNDgwN2YwYjI0N2U3NThiYmNiZWYyODk1MzQ0NGYxOGI1MDNlMw==",
"html_url": "https://github.com/useblocks/sphinxcontrib-needs/commit/dc14807f0b247e758bbcbef28953444f18b503e3",
"comments_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/commits"
"/dc14807f0b247e758bbcbef28953444f18b503e3/comments",
"commit": {
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/commits"
"/dc14807f0b247e758bbcbef28953444f18b503e3",
"author": {
"date": "2021-01-06T06:33:48.000+01:00",
"name": "Daniel Woste",
"email": "daniel.woste@useblocks.com",
},
"committer": {
"date": "2021-01-06T06:47:45.000+01:00",
"name": "Daniel Woste",
"email": "daniel.woste@useblocks.com",
},
"message": "string format fix to support python 3.5",
"tree": {
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/trees"
"/5bf05fc659dd7f2481e5e90ffca5015ee25e047a",
"sha": "5bf05fc659dd7f2481e5e90ffca5015ee25e047a",
},
"comment_count": 0,
},
"author": {
"login": "danwos",
"id": 998700,
"node_id": "MDQ6VXNlcjk5ODcwMA==",
"avatar_url": "https://avatars.githubusercontent.com/u/998700?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/danwos",
"html_url": "https://github.com/danwos",
"followers_url": "https://api.github.com/users/danwos/followers",
"following_url": "https://api.github.com/users/danwos/following{/other_user}",
"gists_url": "https://api.github.com/users/danwos/gists{/gist_id}",
"starred_url": "https://api.github.com/users/danwos/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/danwos/subscriptions",
"organizations_url": "https://api.github.com/users/danwos/orgs",
"repos_url": "https://api.github.com/users/danwos/repos",
"events_url": "https://api.github.com/users/danwos/events{/privacy}",
"received_events_url": "https://api.github.com/users/danwos/received_events",
"type": "User",
"site_admin": False,
},
"committer": {
"login": "danwos",
"id": 998700,
"node_id": "MDQ6VXNlcjk5ODcwMA==",
"avatar_url": "https://avatars.githubusercontent.com/u/998700?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/danwos",
"html_url": "https://github.com/danwos",
"followers_url": "https://api.github.com/users/danwos/followers",
"following_url": "https://api.github.com/users/danwos/following{/other_user}",
"gists_url": "https://api.github.com/users/danwos/gists{/gist_id}",
"starred_url": "https://api.github.com/users/danwos/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/danwos/subscriptions",
"organizations_url": "https://api.github.com/users/danwos/orgs",
"repos_url": "https://api.github.com/users/danwos/repos",
"events_url": "https://api.github.com/users/danwos/events{/privacy}",
"received_events_url": "https://api.github.com/users/danwos/received_events",
"type": "User",
"site_admin": False,
},
"parents": [
{
"url": "https://api.github.com/repos/useblocks"
"/sphinxcontrib-needs/commits"
"/beec7bec42035e3cad09c8400397fa7c828cdd91",
"html_url": "https://github.com/useblocks/sphinxcontrib"
"-needs/commit"
"/beec7bec42035e3cad09c8400397fa7c828cdd91",
"sha": "beec7bec42035e3cad09c8400397fa7c828cdd91",
}
],
"repository": {
"id": 75225949,
"node_id": "MDEwOlJlcG9zaXRvcnk3NTIyNTk0OQ==",
"name": "sphinxcontrib-needs",
"full_name": "useblocks/sphinxcontrib-needs",
"private": False,
"owner": {
"login": "useblocks",
"id": 998587,
"node_id": "MDEyOk9yZ2FuaXphdGlvbjk5ODU4Nw==",
"avatar_url": "https://avatars.githubusercontent.com/u/998587?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/useblocks",
"html_url": "https://github.com/useblocks",
"followers_url": "https://api.github.com/users/useblocks/followers",
"following_url": "https://api.github.com/users/useblocks/following{/other_user}",
"gists_url": "https://api.github.com/users/useblocks/gists{/gist_id}",
"starred_url": "https://api.github.com/users/useblocks/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/useblocks/subscriptions",
"organizations_url": "https://api.github.com/users/useblocks/orgs",
"repos_url": "https://api.github.com/users/useblocks/repos",
"events_url": "https://api.github.com/users/useblocks/events{/privacy}",
"received_events_url": "https://api.github.com/users/useblocks/received_events",
"type": "Organization",
"site_admin": False,
},
"html_url": "https://github.com/useblocks/sphinxcontrib-needs",
"description": "Adds needs/requirements to sphinx",
"fork": False,
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs",
"forks_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/forks",
"keys_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs"
"/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/teams",
"hooks_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/hooks",
"issue_events_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/issues/events{" "/number}",
"events_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/events",
"assignees_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/assignees{/user}",
"branches_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/branches{/branch}",
"tags_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/tags",
"blobs_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/statuses/{sha}",
"languages_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/languages",
"stargazers_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/stargazers",
"contributors_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/contributors",
"subscribers_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/subscribers",
"subscription_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/subscription",
"commits_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git" "/commits{/sha}",
"comments_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/issues" "/comments{/number}",
"contents_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/contents/{+path}",
"compare_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/compare/{base}...{" "head}",
"merges_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/merges",
"archive_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/{archive_format}{" "/ref}",
"downloads_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/downloads",
"issues_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/issues{/number}",
"pulls_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/pulls{/number}",
"milestones_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/milestones{" "/number}",
"notifications_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs"
"/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/labels{/name}",
"releases_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/releases{/id}",
"deployments_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/deployments",
},
"score": 1.0,
}
GITHUB_SEARCH_COMMIT_ANSWER = {
"total_count": 11,
"incomplete_results": False,
"items": [
{
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/commits/dc14807f0b247e758bbcbef28953444f18b503e3",
"sha": "dc14807f0b247e758bbcbef28953444f18b503e3",
"node_id": "MDY6Q29tbWl0NzUyMjU5NDk6ZGMxNDgwN2YwYjI0N2U3NThiYmNiZWYyODk1MzQ0NGYxOGI1MDNlMw==",
"html_url": "https://github.com/useblocks/sphinxcontrib-needs/commit/dc14807f0b247e758bbcbef28953444f18b503e3",
"comments_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/commits/dc14807f0b247e758bbcbef28953444f18b503e3/comments",
"commit": {
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/commits/dc14807f0b247e758bbcbef28953444f18b503e3",
"author": {
"date": "2021-01-06T06:33:48.000+01:00",
"name": "Daniel Woste",
"email": "daniel.woste@useblocks.com",
},
"committer": {
"date": "2021-01-06T06:47:45.000+01:00",
"name": "Daniel Woste",
"email": "daniel.woste@useblocks.com",
},
"message": "string format fix to support python 3.5",
"tree": {
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/trees/5bf05fc659dd7f2481e5e90ffca5015ee25e047a",
"sha": "5bf05fc659dd7f2481e5e90ffca5015ee25e047a",
},
"comment_count": 0,
},
"author": {
"login": "danwos",
"id": 998700,
"node_id": "MDQ6VXNlcjk5ODcwMA==",
"avatar_url": "https://avatars.githubusercontent.com/u/998700?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/danwos",
"html_url": "https://github.com/danwos",
"followers_url": "https://api.github.com/users/danwos/followers",
"following_url": "https://api.github.com/users/danwos/following{/other_user}",
"gists_url": "https://api.github.com/users/danwos/gists{/gist_id}",
"starred_url": "https://api.github.com/users/danwos/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/danwos/subscriptions",
"organizations_url": "https://api.github.com/users/danwos/orgs",
"repos_url": "https://api.github.com/users/danwos/repos",
"events_url": "https://api.github.com/users/danwos/events{/privacy}",
"received_events_url": "https://api.github.com/users/danwos/received_events",
"type": "User",
"site_admin": False,
},
"committer": {
"login": "danwos",
"id": 998700,
"node_id": "MDQ6VXNlcjk5ODcwMA==",
"avatar_url": "https://avatars.githubusercontent.com/u/998700?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/danwos",
"html_url": "https://github.com/danwos",
"followers_url": "https://api.github.com/users/danwos/followers",
"following_url": "https://api.github.com/users/danwos/following{/other_user}",
"gists_url": "https://api.github.com/users/danwos/gists{/gist_id}",
"starred_url": "https://api.github.com/users/danwos/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/danwos/subscriptions",
"organizations_url": "https://api.github.com/users/danwos/orgs",
"repos_url": "https://api.github.com/users/danwos/repos",
"events_url": "https://api.github.com/users/danwos/events{/privacy}",
"received_events_url": "https://api.github.com/users/danwos/received_events",
"type": "User",
"site_admin": False,
},
"parents": [
{
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/commits/beec7bec42035e3cad09c8400397fa7c828cdd91",
"html_url": "https://github.com/useblocks/sphinxcontrib-needs/commit/beec7bec42035e3cad09c8400397fa7c828cdd91",
"sha": "beec7bec42035e3cad09c8400397fa7c828cdd91",
}
],
"repository": {
"id": 75225949,
"node_id": "MDEwOlJlcG9zaXRvcnk3NTIyNTk0OQ==",
"name": "sphinxcontrib-needs",
"full_name": "useblocks/sphinxcontrib-needs",
"private": False,
"owner": {
"login": "useblocks",
"id": 998587,
"node_id": "MDEyOk9yZ2FuaXphdGlvbjk5ODU4Nw==",
"avatar_url": "https://avatars.githubusercontent.com/u/998587?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/useblocks",
"html_url": "https://github.com/useblocks",
"followers_url": "https://api.github.com/users/useblocks/followers",
"following_url": "https://api.github.com/users/useblocks/following{/other_user}",
"gists_url": "https://api.github.com/users/useblocks/gists{/gist_id}",
"starred_url": "https://api.github.com/users/useblocks/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/useblocks/subscriptions",
"organizations_url": "https://api.github.com/users/useblocks/orgs",
"repos_url": "https://api.github.com/users/useblocks/repos",
"events_url": "https://api.github.com/users/useblocks/events{/privacy}",
"received_events_url": "https://api.github.com/users/useblocks/received_events",
"type": "Organization",
"site_admin": False,
},
"html_url": "https://github.com/useblocks/sphinxcontrib-needs",
"description": "Adds needs/requirements to sphinx",
"fork": False,
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs",
"forks_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/forks",
"keys_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/teams",
"hooks_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/hooks",
"issue_events_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/issues/events{/number}",
"events_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/events",
"assignees_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/assignees{/user}",
"branches_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/branches{/branch}",
"tags_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/tags",
"blobs_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/statuses/{sha}",
"languages_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/languages",
"stargazers_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/stargazers",
"contributors_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/contributors",
"subscribers_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/subscribers",
"subscription_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/subscription",
"commits_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/contents/{+path}",
"compare_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/merges",
"archive_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/downloads",
"issues_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/issues{/number}",
"pulls_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/pulls{/number}",
"milestones_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/milestones{/number}",
"notifications_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/labels{/name}",
"releases_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/releases{/id}",
"deployments_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/deployments",
},
"score": 1.0,
},
{
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/commits/e9e2da4fece4a053d1a033027f08356657d61d63",
"sha": "e9e2da4fece4a053d1a033027f08356657d61d63",
"node_id": "MDY6Q29tbWl0NzUyMjU5NDk6ZTllMmRhNGZlY2U0YTA1M2QxYTAzMzAyN2YwODM1NjY1N2Q2MWQ2Mw==",
"html_url": "https://github.com/useblocks/sphinxcontrib-needs/commit/e9e2da4fece4a053d1a033027f08356657d61d63",
"comments_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/commits/e9e2da4fece4a053d1a033027f08356657d61d63/comments",
"commit": {
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/commits/e9e2da4fece4a053d1a033027f08356657d61d63",
"author": {
"date": "2020-04-24T08:34:27.000+02:00",
"name": "Daniel Woste",
"email": "daniel.woste@useblocks.com",
},
"committer": {
"date": "2020-04-24T08:36:31.000+02:00",
"name": "Daniel Woste",
"email": "daniel.woste@useblocks.com",
},
"message": "Pin MarkupSafe installation to version <2 for Python<3.6",
"tree": {
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/trees/cb297c837987a8facf4c5208a24d66162d249dee",
"sha": "cb297c837987a8facf4c5208a24d66162d249dee",
},
"comment_count": 0,
},
"author": {
"login": "danwos",
"id": 998700,
"node_id": "MDQ6VXNlcjk5ODcwMA==",
"avatar_url": "https://avatars.githubusercontent.com/u/998700?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/danwos",
"html_url": "https://github.com/danwos",
"followers_url": "https://api.github.com/users/danwos/followers",
"following_url": "https://api.github.com/users/danwos/following{/other_user}",
"gists_url": "https://api.github.com/users/danwos/gists{/gist_id}",
"starred_url": "https://api.github.com/users/danwos/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/danwos/subscriptions",
"organizations_url": "https://api.github.com/users/danwos/orgs",
"repos_url": "https://api.github.com/users/danwos/repos",
"events_url": "https://api.github.com/users/danwos/events{/privacy}",
"received_events_url": "https://api.github.com/users/danwos/received_events",
"type": "User",
"site_admin": False,
},
"committer": {
"login": "danwos",
"id": 998700,
"node_id": "MDQ6VXNlcjk5ODcwMA==",
"avatar_url": "https://avatars.githubusercontent.com/u/998700?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/danwos",
"html_url": "https://github.com/danwos",
"followers_url": "https://api.github.com/users/danwos/followers",
"following_url": "https://api.github.com/users/danwos/following{/other_user}",
"gists_url": "https://api.github.com/users/danwos/gists{/gist_id}",
"starred_url": "https://api.github.com/users/danwos/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/danwos/subscriptions",
"organizations_url": "https://api.github.com/users/danwos/orgs",
"repos_url": "https://api.github.com/users/danwos/repos",
"events_url": "https://api.github.com/users/danwos/events{/privacy}",
"received_events_url": "https://api.github.com/users/danwos/received_events",
"type": "User",
"site_admin": False,
},
"parents": [
{
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/commits/add7f346e42921f779d2181b95ae2c7aea3d6546",
"html_url": "https://github.com/useblocks/sphinxcontrib-needs/commit/add7f346e42921f779d2181b95ae2c7aea3d6546",
"sha": "add7f346e42921f779d2181b95ae2c7aea3d6546",
}
],
"repository": {
"id": 75225949,
"node_id": "MDEwOlJlcG9zaXRvcnk3NTIyNTk0OQ==",
"name": "sphinxcontrib-needs",
"full_name": "useblocks/sphinxcontrib-needs",
"private": False,
"owner": {
"login": "useblocks",
"id": 998587,
"node_id": "MDEyOk9yZ2FuaXphdGlvbjk5ODU4Nw==",
"avatar_url": "https://avatars.githubusercontent.com/u/998587?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/useblocks",
"html_url": "https://github.com/useblocks",
"followers_url": "https://api.github.com/users/useblocks/followers",
"following_url": "https://api.github.com/users/useblocks/following{/other_user}",
"gists_url": "https://api.github.com/users/useblocks/gists{/gist_id}",
"starred_url": "https://api.github.com/users/useblocks/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/useblocks/subscriptions",
"organizations_url": "https://api.github.com/users/useblocks/orgs",
"repos_url": "https://api.github.com/users/useblocks/repos",
"events_url": "https://api.github.com/users/useblocks/events{/privacy}",
"received_events_url": "https://api.github.com/users/useblocks/received_events",
"type": "Organization",
"site_admin": False,
},
"html_url": "https://github.com/useblocks/sphinxcontrib-needs",
"description": "Adds needs/requirements to sphinx",
"fork": False,
"url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs",
"forks_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/forks",
"keys_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/teams",
"hooks_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/hooks",
"issue_events_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/issues/events{/number}",
"events_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/events",
"assignees_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/assignees{/user}",
"branches_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/branches{/branch}",
"tags_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/tags",
"blobs_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/statuses/{sha}",
"languages_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/languages",
"stargazers_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/stargazers",
"contributors_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/contributors",
"subscribers_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/subscribers",
"subscription_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/subscription",
"commits_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/contents/{+path}",
"compare_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/merges",
"archive_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/downloads",
"issues_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/issues{/number}",
"pulls_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/pulls{/number}",
"milestones_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/milestones{/number}",
"notifications_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/labels{/name}",
"releases_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/releases{/id}",
"deployments_url": "https://api.github.com/repos/useblocks/sphinxcontrib-needs/deployments",
},
"score": 1.0,
},
],
}
| 63.091331
| 147
| 0.602473
| 4,174
| 40,757
| 5.774317
| 0.066363
| 0.103228
| 0.124139
| 0.191851
| 0.965439
| 0.965439
| 0.965439
| 0.965439
| 0.965439
| 0.960958
| 0
| 0.048818
| 0.23304
| 40,757
| 645
| 148
| 63.189147
| 0.722224
| 0.005373
| 0
| 0.828616
| 0
| 0.061321
| 0.677909
| 0.043102
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ac524a22f771c032e06d218afb1ec9820357158c
| 2,730
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowUtdEngineStandardStatistics/cli/equal/golden_output_expected.py
|
nielsvanhooy/genieparser
|
9a1955749697a6777ca614f0af4d5f3a2c254ccd
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowUtdEngineStandardStatistics/cli/equal/golden_output_expected.py
|
nielsvanhooy/genieparser
|
9a1955749697a6777ca614f0af4d5f3a2c254ccd
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowUtdEngineStandardStatistics/cli/equal/golden_output_expected.py
|
nielsvanhooy/genieparser
|
9a1955749697a6777ca614f0af4d5f3a2c254ccd
|
[
"Apache-2.0"
] | null | null | null |
expected_output = {
'engine_number': {
'1': {
'memry_usage_summary': {
'total_non_map_byts': 24719360,
'byts_in_mapped_regions': 437760000,
'total_alloc_space': 23876960,
'total_free_space': 842400,
'topmost_reuse_blk': 45920
},
'pkt_inp_out_totals': {
'received': 0,
'analyzed': 0,
'dropped': 0,
'filtered': 0,
'outstanding': 0,
'injected': 0
},
'breakdown_by_protocol': {
'eth': 0,
'vlan': 0,
'ip4': 0,
'frag': 0,
'icmp': 0,
'udp': 0,
'tcp': 0,
'ip6': 0,
'ip6_ext': 0,
'ip6_opts': 0,
'frag6': 0,
'icmp6': 0,
'udp6': 0,
'tcp6': 0,
'teredo': 0,
'icmp_ip': 0,
'ip4_ip4': 0,
'ip4_ip6': 0,
'ip6_ip4': 0,
'ip6_ip6': 0,
'gre': 0,
'gre_eth': 0,
'gre_vlan': 0,
'gre_ip4': 0,
'gre_ip6': 0,
'gre_ip6_ext': 0,
'gre_pptp': 0,
'gre_arp': 0,
'gre_ipx': 0,
'gre_loop': 0,
'mpls': 0,
'arp': 0,
'ipx': 0,
'eth_loop': 0,
'eth_disc': 0,
'ip6_disc': 0,
'tcp_disc': 0,
'udp_disc': 0,
'icmp_disc': 0,
'all_discard': 0,
'other': 0
},
'action_stats': {
'bad_chk_sum': 0,
'bad_ttl': 0,
's5_g_1': 0,
's5_g_2': 0,
'total': 0,
'alerts': 0,
'logged': 0,
'passed': 0
}
},
'2': {
'memry_usage_summary': {
'total_non_map_byts': 24719360,
'byts_in_mapped_regions': 437760000,
'total_alloc_space': 23876960,
'total_free_space': 842400,
'topmost_reuse_blk': 45920
},
'pkt_inp_out_totals': {
'received': 0,
'analyzed': 0,
'dropped': 0,
'filtered': 0,
'outstanding': 0,
'injected': 0
},
'breakdown_by_protocol': {
'eth': 0,
'vlan': 0,
'ip4': 0,
'frag': 0,
'icmp': 0,
'udp': 0,
'tcp': 0,
'ip6': 0,
'ip6_ext': 0,
'ip6_opts': 0,
'frag6': 0,
'icmp6': 0,
'udp6': 0,
'tcp6': 0,
'teredo': 0,
'icmp_ip': 0,
'ip4_ip4': 0,
'ip4_ip6': 0,
'ip6_ip4': 0,
'ip6_ip6': 0,
'gre': 0,
'gre_eth': 0,
'gre_vlan': 0,
'gre_ip4': 0,
'gre_ip6': 0,
'gre_ip6_ext': 0,
'gre_pptp': 0,
'gre_arp': 0,
'gre_ipx': 0,
'gre_loop': 0,
'mpls': 0,
'arp': 0,
'ipx': 0,
'eth_loop': 0,
'eth_disc': 0,
'ip6_disc': 0,
'tcp_disc': 0,
'udp_disc': 0,
'icmp_disc': 0,
'all_discard': 0,
'other': 0
},
'action_stats': {
'bad_chk_sum': 0,
'bad_ttl': 0,
's5_g_1': 0,
's5_g_2': 0,
'total': 0,
'alerts': 0,
'logged': 0,
'passed': 0
}
}
}
}
| 18.958333
| 41
| 0.452747
| 356
| 2,730
| 3.179775
| 0.219101
| 0.070671
| 0.024735
| 0.038869
| 0.975265
| 0.975265
| 0.975265
| 0.975265
| 0.975265
| 0.975265
| 0
| 0.130852
| 0.350549
| 2,730
| 144
| 42
| 18.958333
| 0.507614
| 0
| 0
| 0.888889
| 0
| 0
| 0.401468
| 0.03323
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.013889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ac74d13729fb1a2e1e6a17c9f2db3316365e433f
| 172
|
py
|
Python
|
tidepool_data_science_metrics/__init__.py
|
tidepool-org/data-science-metrics
|
8340b5cfa6b1c4e844c140d90a6262b8c7ac6216
|
[
"BSD-2-Clause"
] | null | null | null |
tidepool_data_science_metrics/__init__.py
|
tidepool-org/data-science-metrics
|
8340b5cfa6b1c4e844c140d90a6262b8c7ac6216
|
[
"BSD-2-Clause"
] | 1
|
2020-06-02T14:22:47.000Z
|
2020-06-02T14:22:47.000Z
|
tidepool_data_science_metrics/__init__.py
|
tidepool-org/data-science-metrics
|
8340b5cfa6b1c4e844c140d90a6262b8c7ac6216
|
[
"BSD-2-Clause"
] | null | null | null |
from tidepool_data_science_metrics.glucose import glucose
from tidepool_data_science_metrics.common import common
from tidepool_data_science_metrics.insulin import insulin
| 43
| 57
| 0.912791
| 24
| 172
| 6.166667
| 0.375
| 0.243243
| 0.324324
| 0.466216
| 0.608108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 172
| 3
| 58
| 57.333333
| 0.925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
3bbbadadfbf0e2e8dd53973dfbbf790091d276f7
| 585
|
py
|
Python
|
test_examples/templates.py
|
LLYX/fastapi-mail
|
e580541d6e37da74056e6703a701ffaa8590c6ca
|
[
"MIT"
] | null | null | null |
test_examples/templates.py
|
LLYX/fastapi-mail
|
e580541d6e37da74056e6703a701ffaa8590c6ca
|
[
"MIT"
] | null | null | null |
test_examples/templates.py
|
LLYX/fastapi-mail
|
e580541d6e37da74056e6703a701ffaa8590c6ca
|
[
"MIT"
] | 1
|
2022-03-09T08:18:16.000Z
|
2022-03-09T08:18:16.000Z
|
html = """
<html>
<body>
<p>Hi This test mail,
<br>Thanks for using Fastapi-mail</p>
<p> Feel free to <strong>let us</strong> know in case of bug</p>
</body>
</html>
"""
template = """
<html>
<body>
<p>Hi This test mail using BackgroundTask,
<br>Thanks for using Fastapi-mail</p>
<p> Feel free to <strong>let us</strong> know in case of bug</p>
</body>
</html>
"""
bulkmail = """
<html>
<body>
<p>Hi, this Bulk mail using BackgroundTask,
<br>Thanks for using Fastapi-mail</p>
<p> Feel free to <strong>let us</strong> know in case of bug</p>
</body>
</html>
"""
| 15
| 65
| 0.62735
| 97
| 585
| 3.783505
| 0.278351
| 0.065395
| 0.073569
| 0.089918
| 0.934605
| 0.893733
| 0.893733
| 0.779292
| 0.779292
| 0.779292
| 0
| 0
| 0.186325
| 585
| 39
| 66
| 15
| 0.771008
| 0
| 0
| 0.777778
| 0
| 0.111111
| 0.901893
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3bc865bf95fe54d61cdae1c5875ff3bfe909defd
| 557
|
py
|
Python
|
tests/users/test_create_user.py
|
carbonariy/dvhb-hybrid
|
adbb250767ea255addc607fb6f6755c9add447db
|
[
"MIT"
] | 27
|
2018-05-08T16:03:24.000Z
|
2020-02-20T06:39:19.000Z
|
tests/users/test_create_user.py
|
carbonariy/dvhb-hybrid
|
adbb250767ea255addc607fb6f6755c9add447db
|
[
"MIT"
] | 7
|
2018-10-20T16:03:36.000Z
|
2021-11-03T11:09:22.000Z
|
tests/users/test_create_user.py
|
carbonariy/dvhb-hybrid
|
adbb250767ea255addc607fb6f6755c9add447db
|
[
"MIT"
] | 16
|
2018-12-11T15:34:22.000Z
|
2022-01-25T00:20:55.000Z
|
import pytest
@pytest.mark.django_db
async def test_create_user_empty_data(create_user_request):
await create_user_request({}, expected_status=400)
@pytest.mark.django_db
async def test_create_user_successful_default_lang_code(create_user_request, new_user_data):
await create_user_request(new_user_data, expected_status=200)
@pytest.mark.django_db
async def test_create_user_successful_with_lang_code(create_user_request, new_user_data):
new_user_data['lang_code'] = 'fr'
await create_user_request(new_user_data, expected_status=200)
| 30.944444
| 92
| 0.836625
| 87
| 557
| 4.83908
| 0.287356
| 0.213777
| 0.24228
| 0.190024
| 0.741093
| 0.741093
| 0.741093
| 0.741093
| 0.570071
| 0.475059
| 0
| 0.017751
| 0.089767
| 557
| 17
| 93
| 32.764706
| 0.812623
| 0
| 0
| 0.454545
| 0
| 0
| 0.019749
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3bde3ac8aa024de6ffd2f4e669e7604faebd4d57
| 110
|
py
|
Python
|
diffhod/distributions/__init__.py
|
DifferentiableUniverseInitiative/DHOD
|
ad36dbf22b3aab1b4d0b7327f90c29639239b145
|
[
"MIT"
] | 6
|
2020-03-26T17:16:22.000Z
|
2021-08-19T21:39:16.000Z
|
diffhod/distributions/__init__.py
|
DifferentiableUniverseInitiative/DHOD
|
ad36dbf22b3aab1b4d0b7327f90c29639239b145
|
[
"MIT"
] | 23
|
2019-11-12T23:49:31.000Z
|
2021-08-06T16:53:35.000Z
|
diffhod/distributions/__init__.py
|
DifferentiableUniverseInitiative/DHOD
|
ad36dbf22b3aab1b4d0b7327f90c29639239b145
|
[
"MIT"
] | 1
|
2019-12-02T00:52:37.000Z
|
2019-12-02T00:52:37.000Z
|
from diffhod.distributions.NFW import NFW
from diffhod.distributions.RelaxedBernoulli import RelaxedBernoulli
| 36.666667
| 67
| 0.890909
| 12
| 110
| 8.166667
| 0.5
| 0.22449
| 0.489796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 110
| 2
| 68
| 55
| 0.960784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
026eb3405b918de50a23147c9b490cbc9b871e09
| 153
|
py
|
Python
|
rat-sql-gap/seq2struct/models/spider/__init__.py
|
SSamDav/gap-text2sql
|
618df3e82f66bf64c9e8220e81d46324f5144bcf
|
[
"Apache-2.0"
] | 280
|
2020-07-05T08:29:22.000Z
|
2022-03-24T09:11:31.000Z
|
rat-sql-gap/seq2struct/models/spider/__init__.py
|
alan-ai-learner/gap-text2sql
|
c90d4e039123db9c57568d1a005b19e6d35df5ea
|
[
"Apache-2.0"
] | 62
|
2020-07-09T00:26:59.000Z
|
2022-03-22T20:57:26.000Z
|
rat-sql-gap/seq2struct/models/spider/__init__.py
|
alan-ai-learner/gap-text2sql
|
c90d4e039123db9c57568d1a005b19e6d35df5ea
|
[
"Apache-2.0"
] | 105
|
2020-07-05T07:11:48.000Z
|
2022-03-30T06:53:46.000Z
|
from . import spider_dec_func
from . import spider_beam_search
from . import spider_enc_modules
from . import spider_enc
from . import spider_match_utils
| 30.6
| 32
| 0.843137
| 24
| 153
| 5
| 0.458333
| 0.416667
| 0.666667
| 0.316667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.124183
| 153
| 5
| 33
| 30.6
| 0.895522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
028338268001988324547c2b398805def5a8de0a
| 8,628
|
py
|
Python
|
python3/cross_sections.py
|
tkent198/hydraulic_wetro
|
780ae12ab87e88c44ac54f62ec468d3976d0a4b0
|
[
"MIT"
] | null | null | null |
python3/cross_sections.py
|
tkent198/hydraulic_wetro
|
780ae12ab87e88c44ac54f62ec468d3976d0a4b0
|
[
"MIT"
] | null | null | null |
python3/cross_sections.py
|
tkent198/hydraulic_wetro
|
780ae12ab87e88c44ac54f62ec468d3976d0a4b0
|
[
"MIT"
] | null | null | null |
##################################################################
# CHANNEL CROSS-SECTIONS ETC
##################################################################
import numpy as np
##################################################################
def xsec_Ahs(h,s,config):
'''
# function computes cross-section area A, wetted perimeter Wp, radius Rh
# for channel geometry as a function of depth h and along-channel position s
# Input:
# (1) depth h
# (2) coord s
# (3) config
# Output:
# (1) area A
# (2) wetted perimeter Wp
# (3) hydraulic radius
'''
if (s >= config.LR1) & (s <= config.LR2): # city region
if (h < config.hc): # in rect channel
area = h*config.wr
Wp = config.wr + 2*h
else: # > hc in flood
area = h*(config.wr + 2*config.wc) - 2*config.wc*config.hc
Wp = config.wr + 2*config.wc + 2*h
elif (s > config.LR11) & (s < config.LR1): # transition from floodplain to city
w = (s-config.LR11)/(config.LR1 - config.LR11) #linear
w = 0.5*(1 + np.tanh(config.tr*(s - 0.5*(config.LR11+config.LR1)))) #smooth
hrs = w*config.hc + (1-w)*config.hr
hfs = config.hc - hrs
tanas = hfs/config.wf
if (h < hrs): # in rect channel
area = h*config.wr
Wp = config.wr + 2*h
elif (h > hrs + hfs): #above slope
area = h*(config.wr + config.wf) - config.wf*(hrs + 0.5*hfs)
Wp = config.wr + 2*h - hfs + np.sqrt(hfs**2 + config.wf**2)
else: # middle sloped region
area = h*config.wr + 0.5*(h - hrs)**2/tanas
Wp = h + config.wr + hrs + (h - hrs)*np.sqrt(1 + tanas**-2)
elif (s > config.LR2) & (s < config.LR22): # transition to floodplain from city
w = (s-config.LR2)/(config.LR22 - config.LR2) #linear
w = 0.5*(1 + np.tanh(config.tr*(s - 0.5*(config.LR11+config.LR1)))) #smooth
hrs = w*config.hr + (1-w)*config.hc
hfs = config.hc - hrs
tanas = hfs/config.wf
if (h < hrs): # in rect channel
area = h*config.wr
Wp = config.wr + 2*h
elif (h > hrs + hfs): #above slope
area = h*(config.wr + config.wf) - config.wf*(hrs + 0.5*hfs)
Wp = config.wr + 2*h - hfs + np.sqrt(hfs**2 + config.wf**2)
else: # middle sloped region
area = h*config.wr + 0.5*(h - hrs)**2/tanas
Wp = h + config.wr + hrs + (h - hrs)*np.sqrt(1 + tanas**-2)
else: # floodplain
if (h < config.hr): # in rect channel
area = h*config.wr
Wp = config.wr + 2*h
elif (h > config.hr + config.hf): #above slope
area = h*(config.wr + config.wf) - config.wf*(config.hr + 0.5*config.hf)
Wp = config.wr + 2*h - config.hf + np.sqrt(config.hf**2 + config.wf**2)
else: # middle sloped region
area = h*config.wr + 0.5*(h - config.hr)**2/config.tana
Wp = h + config.wr + config.hr + (h - config.hr)*np.sqrt(1 + config.tana**-2)
Rh = area/Wp
return area, Wp, Rh
##################################################################
def xsec_hAs(A,s,config):
'''
# function computes depth h, and derivative dh/dA
# for channel geometry as a function of area A and along-channel position s
# Input:
# (1) area A
# (2) coord s
# (3) config file
# Output:
# (1) depth h
# (2) derivative dhdA
'''
# critical areas
A1 = config.wr*config.hr # threshold area river
A2 = (config.hr+config.hf)*(config.wr+config.wf)-config.wf*(config.hr+0.5*config.hf) # 2nd threshold area river
Ac = config.wr*config.hc # threshold area city
if (s > config.LR1) & (s < config.LR2): # city region
if (A < Ac): # in rect channel
h = A/config.wr
dhdA = 1/config.wr
else: # > Ac in flood
h = (A + 2*config.wc*config.hc)/(config.wr + 2*config.wc)
dhdA = 1/(config.wr + 2*config.wc)
elif (s > config.LR11) & (s < config.LR1): # transition from floodplain to city
w = (s-config.LR11)/(config.LR1 - config.LR11)
hrs = w*config.hc + (1-w)*config.hr
hfs = config.hc - hrs
tanas = hfs/config.wf
A1 = config.wr*hrs # threshold area river
A2 = (hrs+hfs)*(config.wr+config.wf)-config.wf*(hrs+0.5*hfs) # 2nd threshold area river
if (A < A1): # in rect channel
h = A/config.wr
dhdA = 1/config.wr
elif (A > A2): #above slope
h = (A + config.wf*(hrs + 0.5*hfs))/(config.wr + config.wf)
dhdA = 1/(config.wr + config.wf)
else: # middle sloped region
h = hrs - config.wr*tanas + np.sqrt(tanas**2*config.wr**2 + 2*(A - config.wr*hrs)*tanas)
dhdA = tanas/np.sqrt(tanas**2*config.wr**2 + 2*(A - config.wr*hrs)*tanas)
elif (s > config.LR2) & (s < config.LR22): # transition from city to floodplain
w = (s-config.LR2)/(config.LR22 - config.LR2)
hrs = w*config.hr + (1-w)*config.hc
hfs = config.hc - hrs
tanas = hfs/config.wf
A1 = config.wr*hrs # threshold area river
A2 = (hrs+hfs)*(config.wr+config.wf)-config.wf*(hrs+0.5*hfs) # 2nd threshold area river
if (A < A1): # in rect channel
h = A/config.wr
dhdA = 1/config.wr
elif (A > A2): #above slope
h = (A + config.wf*(hrs + 0.5*hfs))/(config.wr + config.wf)
dhdA = 1/(config.wr + config.wf)
else: # middle sloped region
h = hrs - config.wr*tanas + np.sqrt(tanas**2*config.wr**2 + 2*(A - config.wr*hrs)*tanas)
dhdA = tanas/np.sqrt(tanas**2*config.wr**2 + 2*(A - config.wr*hrs)*tanas)
else: # floodplain
if (A < A1): # in rect channel
h = A/config.wr
dhdA = 1/config.wr
elif (A > A2): #above slope
h = (A + config.wf*(config.hr + 0.5*config.hf))/(config.wr + config.wf)
dhdA = 1/(config.wr + config.wf)
else: # middle sloped region
h = config.hr - config.wr*config.tana + np.sqrt(config.tana**2*config.wr**2 + 2*(A - config.wr*config.hr)*config.tana)
# dhdA = sqrt(config.tana/(2*A)); WRONG!!!
dhdA = config.tana/np.sqrt(config.tana**2*config.wr**2 + 2*(A - config.wr*config.hr)*config.tana)
return h, dhdA
##################################################################
def plot_xsec_hAs(A,s,config):
'''
# function computes coords for plotting plots of water depth h
# in cross section A at location s: h = h(A(s,t),s)
# Input:
# (1) area A
# (2) coord s
# (3) config parameters
# Output: [X,Y,Xc,Yc,h]
'''
# critical areas
A1 = config.wr*config.hr # threshold area river
A2 = (config.hr+config.hf)*(config.wr+config.wf)-config.wf*(config.hr+0.5*config.hf) # 2nd threshold area river
Ac = config.wr*config.hc # threshold area city
if (s > config.LR1) & (s < config.LR2): # city region
Xc = [-config.wc,-config.wc, 0, 0, config.wr, config.wr, config.wr+config.wc, config.wr+config.wc]
Yc = [config.hc+config.hc, config.hc, config.hc, 0, 0, config.hc, config.hc, config.hc+config.hc]
if (A < Ac): # in rect channel
h = A/config.wr
X = [0,0,config.wr,config.wr]
Y = [h,0,0,h]
else: # > Ac in flood
h = (A + 2*config.wc*config.hc)/(config.wr + 2*config.wc)
X = [-config.wc,-config.wc, 0, 0, config.wr, config.wr, config.wr+config.wc, config.wr+config.wc]
Y = [h, config.hc, config.hc, 0, 0, config.hc, config.hc, h]
else: # floodplain
Xc = [0, 0, config.wr, config.wr, config.wr+config.wf, config.wr+config.wf]
Yc = [config.hc+config.hc,0 ,0 ,config.hr, config.hr+config.hf, config.hc+config.hc]
if (A < A1): # in rect channel
h = A/config.wr
X = [0,0,config.wr,config.wr]
Y = [h,0,0,h]
elif (A > A2): #above slope
h = (A + config.wf*(config.hr + 0.5*config.hf))/(config.wr + config.wf)
X = [0, 0, config.wr, config.wr, config.wr+config.wf, config.wr+config.wf]
Y = [h,0 ,0 ,config.hr, config.hr+config.hf, h]
else: # middle sloped region
h = config.hr - config.wr*config.tana + np.sqrt(config.tana**2*config.wr**2 + 2*(A - config.wr*config.hr)*config.tana)
X = [0, 0, config.wr, config.wr, config.wr+(h-config.hr)/config.tana]
Y = [h,0,0,config.hr,h]
return X,Y,Xc,Yc,h
| 36.871795
| 130
| 0.518197
| 1,303
| 8,628
| 3.428243
| 0.080583
| 0.166555
| 0.1379
| 0.064473
| 0.854041
| 0.836803
| 0.822028
| 0.778822
| 0.730916
| 0.724647
| 0
| 0.033675
| 0.287552
| 8,628
| 233
| 131
| 37.030043
| 0.693021
| 0.19599
| 0
| 0.779412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022059
| false
| 0
| 0.007353
| 0
| 0.051471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5a02fa6f7a0cbacba1f4a0c80e53d2b31786f440
| 85,818
|
py
|
Python
|
test/search_api/test_search_api_query_filter_factory.py
|
antolinos/datagateway-api
|
c6aa15ea01545f7a8e58e656c569523c60f7e4ef
|
[
"Apache-2.0"
] | null | null | null |
test/search_api/test_search_api_query_filter_factory.py
|
antolinos/datagateway-api
|
c6aa15ea01545f7a8e58e656c569523c60f7e4ef
|
[
"Apache-2.0"
] | null | null | null |
test/search_api/test_search_api_query_filter_factory.py
|
antolinos/datagateway-api
|
c6aa15ea01545f7a8e58e656c569523c60f7e4ef
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from datagateway_api.src.common.exceptions import FilterError, SearchAPIError
from datagateway_api.src.search_api.filters import (
SearchAPIIncludeFilter,
SearchAPILimitFilter,
SearchAPISkipFilter,
SearchAPIWhereFilter,
)
from datagateway_api.src.search_api.nested_where_filters import NestedWhereFilters
from datagateway_api.src.search_api.query import SearchAPIQuery
from datagateway_api.src.search_api.query_filter_factory import (
SearchAPIQueryFilterFactory,
)
class TestSearchAPIQueryFilterFactory:
@pytest.mark.parametrize(
"test_request_filter, test_entity_name, expected_where",
[
pytest.param(
{"filter": {"where": {"title": "My Title"}}},
"Document",
SearchAPIWhereFilter("title", "My Title", "eq"),
id="Property value with no operator",
),
pytest.param(
{"filter": {"where": {"summary": {"like": "My Test Summary"}}}},
"Document",
SearchAPIWhereFilter("summary", "My Test Summary", "like"),
id="Property value with operator",
),
pytest.param(
{"where": {"summary": {"like": "My Test Summary"}}},
"Document",
SearchAPIWhereFilter("summary", "My Test Summary", "like"),
id="WHERE filter in syntax for count endpoints",
),
],
)
def test_valid_where_filter(
self, test_request_filter, test_entity_name, expected_where,
):
filters = SearchAPIQueryFilterFactory.get_query_filter(
test_request_filter, test_entity_name,
)
assert len(filters) == 1
assert repr(filters[0]) == repr(expected_where)
@pytest.mark.parametrize(
"test_request_filter, test_entity_name, expected_filters",
[
pytest.param(
{"filter": {"where": {"isPublic": True}}},
"Dataset",
[],
id="Public data",
),
pytest.param(
{"filter": {"where": {"isPublic": {"neq": False}}}},
"Dataset",
[],
id="Public data - neq operator",
),
pytest.param(
{"filter": {"where": {"isPublic": {"eq": False}}}},
"Dataset",
[SearchAPISkipFilter(1), SearchAPILimitFilter(0)],
id="Non-public data",
),
pytest.param(
{"filter": {"where": {"isPublic": {"neq": True}}}},
"Dataset",
[SearchAPISkipFilter(1), SearchAPILimitFilter(0)],
id="Non-public data - neq operator",
),
],
)
def test_valid_where_filter_on_is_public_field(
self, test_request_filter, test_entity_name, expected_filters,
):
filters = SearchAPIQueryFilterFactory.get_query_filter(
test_request_filter, test_entity_name,
)
assert len(filters) == len(expected_filters)
for test_filter in filters:
if isinstance(test_filter, SearchAPISkipFilter):
assert test_filter.skip_value == 1
if isinstance(test_filter, SearchAPILimitFilter):
assert test_filter.limit_value == 0
@pytest.mark.parametrize(
"test_request_filter, test_entity_name, expected_lhs, expected_rhs"
", expected_joining_operator",
[
pytest.param(
{"filter": {"where": {"text": "Dataset 1"}}},
"Dataset",
[],
[SearchAPIWhereFilter("title", "Dataset 1", "like")],
"or",
id="Text operator on dataset",
),
pytest.param(
{"filter": {"where": {"text": "Instrument 1"}}},
"Instrument",
[SearchAPIWhereFilter("name", "Instrument 1", "like")],
[SearchAPIWhereFilter("facility", "Instrument 1", "like")],
"or",
id="Text operator on instrument",
),
],
)
def test_valid_where_filter_text_operator(
self,
test_request_filter,
test_entity_name,
expected_lhs,
expected_rhs,
expected_joining_operator,
):
filters = SearchAPIQueryFilterFactory.get_query_filter(
test_request_filter, test_entity_name,
)
assert len(filters) == 1
assert isinstance(filters[0], NestedWhereFilters)
assert repr(filters[0].lhs) == repr(expected_lhs)
assert repr(filters[0].rhs) == repr(expected_rhs)
assert filters[0].joining_operator == expected_joining_operator
assert repr(filters[0].search_api_query) == repr(
SearchAPIQuery(test_entity_name),
)
@pytest.mark.parametrize(
"test_request_filter, test_entity_name",
[
pytest.param(
{"filter": {"where": {"text": "Instrument 1"}}},
"UnknownEntity",
id="Unknown entity",
),
],
)
def test_invalid_where_filter_text_operator(
self, test_request_filter, test_entity_name,
):
with pytest.raises(SearchAPIError):
SearchAPIQueryFilterFactory.get_query_filter(
test_request_filter, test_entity_name,
)
@pytest.mark.parametrize(
"test_request_filter, test_entity_name, expected_lhs, expected_rhs"
", expected_joining_operator",
[
pytest.param(
{"filter": {"where": {"and": [{"summary": "My Test Summary"}]}}},
"Document",
[],
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
"and",
id="Single condition, property value with no operator",
),
pytest.param(
{
"filter": {
"where": {
"and": [
{"summary": "My Test Summary"},
{"title": "Test title"},
],
},
},
},
"Document",
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
[SearchAPIWhereFilter("title", "Test title", "eq")],
"and",
id="Multiple conditions (two), property values with no operator",
),
pytest.param(
{
"filter": {
"where": {
"and": [
{"summary": "My Test Summary"},
{"title": "Test title"},
{"type": "Test type"},
],
},
},
},
"Document",
[
SearchAPIWhereFilter("summary", "My Test Summary", "eq"),
SearchAPIWhereFilter("title", "Test title", "eq"),
],
[SearchAPIWhereFilter("type", "Test type", "eq")],
"and",
id="Multiple conditions (three), property values with no operator",
),
pytest.param(
{"filter": {"where": {"and": [{"size": {"lt": 50}}]}}},
"File",
[],
[SearchAPIWhereFilter("size", 50, "lt")],
"and",
id="Single condition, property value with operator",
),
pytest.param(
{
"filter": {
"where": {
"and": [
{"role": {"like": "Test role"}},
{"size": {"gte": 275}},
],
},
},
},
"Member",
[SearchAPIWhereFilter("role", "Test role", "like")],
[SearchAPIWhereFilter("size", 275, "gte")],
"and",
id="Multiple conditions (two), property values with operator",
),
pytest.param(
{
"filter": {
"where": {
"and": [
{"name": {"like": "Test name"}},
{"size": {"gte": 275}},
{"path": {"nlike": "Test path"}},
],
},
},
},
"File",
[
SearchAPIWhereFilter("name", "Test name", "like"),
SearchAPIWhereFilter("size", 275, "gte"),
],
[SearchAPIWhereFilter("path", "Test path", "nlike")],
"and",
id="Multiple conditions (three), property values with operator",
),
pytest.param(
{"filter": {"where": {"and": [{"text": "Dataset 1"}]}}},
"Dataset",
[],
[
NestedWhereFilters(
[], SearchAPIWhereFilter("title", "Dataset 1", "like"), "or",
),
],
"and",
id="Single condition, text operator on dataset",
),
pytest.param(
{"filter": {"where": {"and": [{"text": "Instrument 1"}]}}},
"Instrument",
[],
[
NestedWhereFilters(
[SearchAPIWhereFilter("name", "Instrument 1", "like")],
[SearchAPIWhereFilter("facility", "Instrument 1", "like")],
"or",
),
],
"and",
id="Single condition, text operator on instrument",
),
pytest.param(
{
"filter": {
"where": {"and": [{"text": "Dataset 1"}, {"pid": "Test pid"}]},
},
},
"Dataset",
[
NestedWhereFilters(
[], [SearchAPIWhereFilter("title", "Dataset 1", "like")], "or",
),
],
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
"and",
id="Multiple conditions (two), text operator on dataset and "
"property value with no operator",
),
pytest.param(
{
"filter": {
"where": {
"and": [{"text": "Instrument 1"}, {"pid": "Test pid"}],
},
},
},
"Instrument",
[
NestedWhereFilters(
[SearchAPIWhereFilter("name", "Instrument 1", "like")],
[SearchAPIWhereFilter("facility", "Instrument 1", "like")],
"or",
),
],
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
"and",
id="Multiple conditions (two), text operator on instrument and "
"property value with no operator",
),
pytest.param(
{
"filter": {
"where": {
"and": [
{"text": "Dataset 1"},
{"pid": {"eq": "Test pid"}},
],
},
},
},
"Dataset",
[
NestedWhereFilters(
[], [SearchAPIWhereFilter("title", "Dataset 1", "like")], "or",
),
],
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
"and",
id="Multiple conditions (two), text operator on dataset and "
"property value with operator",
),
pytest.param(
{
"filter": {
"where": {
"and": [
{"text": "Instrument 1"},
{"pid": {"eq": "Test pid"}},
],
},
},
},
"Instrument",
[
NestedWhereFilters(
[SearchAPIWhereFilter("name", "Instrument 1", "like")],
[SearchAPIWhereFilter("facility", "Instrument 1", "like")],
"or",
),
],
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
"and",
id="Multiple conditions (two), text operator on instrument and "
"property value with operator",
),
],
)
def test_valid_where_filter_with_and_boolean_operator(
self,
test_request_filter,
test_entity_name,
expected_lhs,
expected_rhs,
expected_joining_operator,
):
filters = SearchAPIQueryFilterFactory.get_query_filter(
test_request_filter, test_entity_name,
)
assert len(filters) == 1
assert isinstance(filters[0], NestedWhereFilters)
assert repr(filters[0].lhs) == repr(expected_lhs)
assert repr(filters[0].rhs) == repr(expected_rhs)
assert filters[0].joining_operator == expected_joining_operator
assert repr(filters[0].search_api_query) == repr(
SearchAPIQuery(test_entity_name),
)
@pytest.mark.parametrize(
"test_request_filter, test_entity_name, expected_lhs, expected_rhs"
", expected_joining_operator",
[
pytest.param(
{"filter": {"where": {"or": [{"summary": "My Test Summary"}]}}},
"Document",
[],
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
"or",
id="Single condition, property value with no operator",
),
pytest.param(
{
"filter": {
"where": {
"or": [
{"summary": "My Test Summary"},
{"title": "Test title"},
],
},
},
},
"Document",
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
[SearchAPIWhereFilter("title", "Test title", "eq")],
"or",
id="Multiple conditions (two), property values with no operator",
),
pytest.param(
{
"filter": {
"where": {
"or": [
{"summary": "My Test Summary"},
{"title": "Test title"},
{"type": "Test type"},
],
},
},
},
"Document",
[
SearchAPIWhereFilter("summary", "My Test Summary", "eq"),
SearchAPIWhereFilter("title", "Test title", "eq"),
],
[SearchAPIWhereFilter("type", "Test type", "eq")],
"or",
id="Multiple conditions (three), property values with no operator",
),
pytest.param(
{"filter": {"where": {"or": [{"size": {"lt": 50}}]}}},
"File",
[],
[SearchAPIWhereFilter("size", 50, "lt")],
"or",
id="Single condition, property value with operator",
),
pytest.param(
{
"filter": {
"where": {
"or": [
{"name": {"like": "Test name"}},
{"size": {"gte": 275}},
],
},
},
},
"File",
[SearchAPIWhereFilter("name", "Test name", "like")],
[SearchAPIWhereFilter("size", 275, "gte")],
"or",
id="Multiple conditions (two), property values with operator",
),
pytest.param(
{
"filter": {
"where": {
"or": [
{"name": {"like": "Test name"}},
{"size": {"gte": 275}},
{"path": {"nlike": "Test path"}},
],
},
},
},
"File",
[
SearchAPIWhereFilter("name", "Test name", "like"),
SearchAPIWhereFilter("size", 275, "gte"),
],
[SearchAPIWhereFilter("path", "Test path", "nlike")],
"or",
id="Multiple conditions (three), property values with operator",
),
pytest.param(
{"filter": {"where": {"or": [{"text": "Dataset 1"}]}}},
"Dataset",
[],
[
NestedWhereFilters(
[], SearchAPIWhereFilter("title", "Dataset 1", "like"), "or",
),
],
"or",
id="Single condition, text operator on dataset",
),
pytest.param(
{"filter": {"where": {"or": [{"text": "Instrument 1"}]}}},
"Instrument",
[],
[
NestedWhereFilters(
[SearchAPIWhereFilter("name", "Instrument 1", "like")],
[SearchAPIWhereFilter("facility", "Instrument 1", "like")],
"or",
),
],
"or",
id="Single condition, text operator on instrument",
),
pytest.param(
{
"filter": {
"where": {"or": [{"text": "Dataset 1"}, {"pid": "Test pid"}]},
},
},
"Dataset",
[
NestedWhereFilters(
[], [SearchAPIWhereFilter("title", "Dataset 1", "like")], "or",
),
],
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
"or",
id="Multiple conditions (two), text operator on dataset and "
"property value with no operator",
),
pytest.param(
{
"filter": {
"where": {
"or": [{"text": "Instrument 1"}, {"pid": "Test pid"}],
},
},
},
"Instrument",
[
NestedWhereFilters(
[SearchAPIWhereFilter("name", "Instrument 1", "like")],
[SearchAPIWhereFilter("facility", "Instrument 1", "like")],
"or",
),
],
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
"or",
id="Multiple conditions (two), text operator on instrument and "
"property value with no operator",
),
pytest.param(
{
"filter": {
"where": {
"or": [{"text": "Dataset 1"}, {"pid": {"eq": "Test pid"}}],
},
},
},
"Dataset",
[
NestedWhereFilters(
[], [SearchAPIWhereFilter("title", "Dataset 1", "like")], "or",
),
],
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
"or",
id="Multiple conditions (two), text operator on dataset and "
"property value with operator",
),
pytest.param(
{
"filter": {
"where": {
"or": [
{"text": "Instrument 1"},
{"pid": {"eq": "Test pid"}},
],
},
},
},
"Instrument",
[
NestedWhereFilters(
[SearchAPIWhereFilter("name", "Instrument 1", "like")],
[SearchAPIWhereFilter("facility", "Instrument 1", "like")],
"or",
),
],
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
"or",
id="Multiple conditions (two), text operator on instrument and "
"property value with operator",
),
],
)
def test_valid_where_filter_with_or_boolean_operator(
self,
test_request_filter,
test_entity_name,
expected_lhs,
expected_rhs,
expected_joining_operator,
):
filters = SearchAPIQueryFilterFactory.get_query_filter(
test_request_filter, test_entity_name,
)
assert len(filters) == 1
assert isinstance(filters[0], NestedWhereFilters)
assert repr(filters[0].lhs) == repr(expected_lhs)
assert repr(filters[0].rhs) == repr(expected_rhs)
assert filters[0].joining_operator == expected_joining_operator
assert repr(filters[0].search_api_query) == repr(
SearchAPIQuery(test_entity_name),
)
@pytest.mark.parametrize(
"test_request_filter, test_entity_name, expected_lhs, expected_rhs"
", expected_joining_operator",
[
pytest.param(
{
"filter": {
"where": {
"and": [
{
"and": [
{"summary": "My Test Summary"},
{"title": {"like": "Test title"}},
],
},
{
"and": [
{"pid": "Test pid"},
{"type": {"eq": "Test type"}},
],
},
],
},
},
},
"Document",
[
NestedWhereFilters(
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
[SearchAPIWhereFilter("title", "Test title", "like")],
"and",
),
],
[
NestedWhereFilters(
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
[SearchAPIWhereFilter("type", "Test type", "eq")],
"and",
),
],
"and",
id="With two AND boolean operators",
),
pytest.param(
{
"filter": {
"where": {
"and": [
{
"and": [
{"summary": "My Test Summary"},
{"title": {"like": "Test title"}},
],
},
{
"or": [
{"pid": "Test pid"},
{"type": {"eq": "Test type"}},
],
},
],
},
},
},
"Document",
[
NestedWhereFilters(
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
[SearchAPIWhereFilter("title", "Test title", "like")],
"and",
),
],
[
NestedWhereFilters(
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
[SearchAPIWhereFilter("type", "Test type", "eq")],
"or",
),
],
"and",
id="With AND and OR boolean operators",
),
pytest.param(
{
"filter": {
"where": {
"and": [
{
"or": [
{"summary": "My Test Summary"},
{"title": {"like": "Test title"}},
],
},
{
"or": [
{"pid": "Test pid"},
{"type": {"eq": "Test type"}},
],
},
],
},
},
},
"Document",
[
NestedWhereFilters(
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
[SearchAPIWhereFilter("title", "Test title", "like")],
"or",
),
],
[
NestedWhereFilters(
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
[SearchAPIWhereFilter("type", "Test type", "eq")],
"or",
),
],
"and",
id="With two OR boolean operators",
),
pytest.param(
{
"filter": {
"where": {
"and": [
{
"and": [
{"summary": "My Test Summary"},
{"title": {"like": "Test title"}},
],
},
{
"and": [
{"pid": "Test pid"},
{"type": {"eq": "Test type"}},
],
},
{
"and": [
{"doi": "Test doi"},
{"license": {"like": "Test license"}},
],
},
],
},
},
},
"Document",
[
NestedWhereFilters(
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
[SearchAPIWhereFilter("title", "Test title", "like")],
"and",
),
NestedWhereFilters(
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
[SearchAPIWhereFilter("type", "Test type", "eq")],
"and",
),
],
[
NestedWhereFilters(
[SearchAPIWhereFilter("doi", "Test doi", "eq")],
[SearchAPIWhereFilter("license", "Test license", "like")],
"and",
),
],
"and",
id="With three AND boolean operators",
),
pytest.param(
{
"filter": {
"where": {
"and": [
{
"and": [
{"summary": "My Test Summary"},
{"title": {"like": "Test title"}},
],
},
{
"and": [
{"pid": "Test pid"},
{"type": {"eq": "Test type"}},
],
},
{
"or": [
{"doi": "Test doi"},
{"license": {"like": "Test license"}},
],
},
],
},
},
},
"Document",
[
NestedWhereFilters(
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
[SearchAPIWhereFilter("title", "Test title", "like")],
"and",
),
NestedWhereFilters(
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
[SearchAPIWhereFilter("type", "Test type", "eq")],
"and",
),
],
[
NestedWhereFilters(
[SearchAPIWhereFilter("doi", "Test doi", "eq")],
[SearchAPIWhereFilter("license", "Test license", "like")],
"or",
),
],
"and",
id="With two AND and one OR boolean operators",
),
pytest.param(
{
"filter": {
"where": {
"and": [
{
"and": [
{"summary": "My Test Summary"},
{"title": {"like": "Test title"}},
],
},
{
"or": [
{"pid": "Test pid"},
{"type": {"eq": "Test type"}},
],
},
{
"or": [
{"doi": "Test doi"},
{"license": {"like": "Test license"}},
],
},
],
},
},
},
"Document",
[
NestedWhereFilters(
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
[SearchAPIWhereFilter("title", "Test title", "like")],
"and",
),
NestedWhereFilters(
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
[SearchAPIWhereFilter("type", "Test type", "eq")],
"or",
),
],
[
NestedWhereFilters(
[SearchAPIWhereFilter("doi", "Test doi", "eq")],
[SearchAPIWhereFilter("license", "Test license", "like")],
"or",
),
],
"and",
id="With one AND and two OR boolean operators",
),
pytest.param(
{
"filter": {
"where": {
"and": [
{
"or": [
{"summary": "My Test Summary"},
{"title": {"like": "Test title"}},
],
},
{
"or": [
{"pid": "Test pid"},
{"type": {"eq": "Test type"}},
],
},
{
"or": [
{"doi": "Test doi"},
{"license": {"like": "Test license"}},
],
},
],
},
},
},
"Document",
[
NestedWhereFilters(
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
[SearchAPIWhereFilter("title", "Test title", "like")],
"or",
),
NestedWhereFilters(
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
[SearchAPIWhereFilter("type", "Test type", "eq")],
"or",
),
],
[
NestedWhereFilters(
[SearchAPIWhereFilter("doi", "Test doi", "eq")],
[SearchAPIWhereFilter("license", "Test license", "like")],
"or",
),
],
"and",
id="With three OR boolean operators",
),
],
)
def test_valid_where_filter_with_nested_and_boolean_operator(
self,
test_request_filter,
test_entity_name,
expected_lhs,
expected_rhs,
expected_joining_operator,
):
filters = SearchAPIQueryFilterFactory.get_query_filter(
test_request_filter, test_entity_name,
)
assert len(filters) == 1
assert isinstance(filters[0], NestedWhereFilters)
assert repr(filters[0].lhs) == repr(expected_lhs)
assert repr(filters[0].rhs) == repr(expected_rhs)
assert filters[0].joining_operator == expected_joining_operator
assert repr(filters[0].search_api_query) == repr(
SearchAPIQuery(test_entity_name),
)
@pytest.mark.parametrize(
"test_request_filter, test_entity_name, expected_lhs, expected_rhs"
", expected_joining_operator",
[
pytest.param(
{
"filter": {
"where": {
"or": [
{
"and": [
{"summary": "My Test Summary"},
{"title": {"like": "Test title"}},
],
},
{
"and": [
{"pid": "Test pid"},
{"type": {"eq": "Test type"}},
],
},
],
},
},
},
"Document",
[
NestedWhereFilters(
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
[SearchAPIWhereFilter("title", "Test title", "like")],
"and",
),
],
[
NestedWhereFilters(
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
[SearchAPIWhereFilter("type", "Test type", "eq")],
"and",
),
],
"or",
id="With two AND boolean operators",
),
pytest.param(
{
"filter": {
"where": {
"or": [
{
"and": [
{"summary": "My Test Summary"},
{"title": {"like": "Test title"}},
],
},
{
"or": [
{"pid": "Test pid"},
{"type": {"eq": "Test type"}},
],
},
],
},
},
},
"Document",
[
NestedWhereFilters(
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
[SearchAPIWhereFilter("title", "Test title", "like")],
"and",
),
],
[
NestedWhereFilters(
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
[SearchAPIWhereFilter("type", "Test type", "eq")],
"or",
),
],
"or",
id="With AND and OR boolean operators",
),
pytest.param(
{
"filter": {
"where": {
"or": [
{
"or": [
{"summary": "My Test Summary"},
{"title": {"like": "Test title"}},
],
},
{
"or": [
{"pid": "Test pid"},
{"type": {"eq": "Test type"}},
],
},
],
},
},
},
"Document",
[
NestedWhereFilters(
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
[SearchAPIWhereFilter("title", "Test title", "like")],
"or",
),
],
[
NestedWhereFilters(
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
[SearchAPIWhereFilter("type", "Test type", "eq")],
"or",
),
],
"or",
id="With two OR boolean operators",
),
pytest.param(
{
"filter": {
"where": {
"or": [
{
"and": [
{"summary": "My Test Summary"},
{"title": {"like": "Test title"}},
],
},
{
"and": [
{"pid": "Test pid"},
{"type": {"eq": "Test type"}},
],
},
{
"and": [
{"doi": "Test doi"},
{"license": {"like": "Test license"}},
],
},
],
},
},
},
"Document",
[
NestedWhereFilters(
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
[SearchAPIWhereFilter("title", "Test title", "like")],
"and",
),
NestedWhereFilters(
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
[SearchAPIWhereFilter("type", "Test type", "eq")],
"and",
),
],
[
NestedWhereFilters(
[SearchAPIWhereFilter("doi", "Test doi", "eq")],
[SearchAPIWhereFilter("license", "Test license", "like")],
"and",
),
],
"or",
id="With three AND boolean operators",
),
pytest.param(
{
"filter": {
"where": {
"or": [
{
"and": [
{"summary": "My Test Summary"},
{"title": {"like": "Test title"}},
],
},
{
"and": [
{"pid": "Test pid"},
{"type": {"eq": "Test type"}},
],
},
{
"or": [
{"doi": "Test doi"},
{"license": {"like": "Test license"}},
],
},
],
},
},
},
"Document",
[
NestedWhereFilters(
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
[SearchAPIWhereFilter("title", "Test title", "like")],
"and",
),
NestedWhereFilters(
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
[SearchAPIWhereFilter("type", "Test type", "eq")],
"and",
),
],
[
NestedWhereFilters(
[SearchAPIWhereFilter("doi", "Test doi", "eq")],
[SearchAPIWhereFilter("license", "Test license", "like")],
"or",
),
],
"or",
id="With two AND and one OR boolean operators",
),
pytest.param(
{
"filter": {
"where": {
"or": [
{
"and": [
{"summary": "My Test Summary"},
{"title": {"like": "Test title"}},
],
},
{
"or": [
{"pid": "Test pid"},
{"type": {"eq": "Test type"}},
],
},
{
"or": [
{"doi": "Test doi"},
{"license": {"like": "Test license"}},
],
},
],
},
},
},
"Document",
[
NestedWhereFilters(
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
[SearchAPIWhereFilter("title", "Test title", "like")],
"and",
),
NestedWhereFilters(
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
[SearchAPIWhereFilter("type", "Test type", "eq")],
"or",
),
],
[
NestedWhereFilters(
[SearchAPIWhereFilter("doi", "Test doi", "eq")],
[SearchAPIWhereFilter("license", "Test license", "like")],
"or",
),
],
"or",
id="With one AND and two OR boolean operators",
),
pytest.param(
{
"filter": {
"where": {
"or": [
{
"or": [
{"summary": "My Test Summary"},
{"title": {"like": "Test title"}},
],
},
{
"or": [
{"pid": "Test pid"},
{"type": {"eq": "Test type"}},
],
},
{
"or": [
{"doi": "Test doi"},
{"license": {"like": "Test license"}},
],
},
],
},
},
},
"Document",
[
NestedWhereFilters(
[SearchAPIWhereFilter("summary", "My Test Summary", "eq")],
[SearchAPIWhereFilter("title", "Test title", "like")],
"or",
),
NestedWhereFilters(
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
[SearchAPIWhereFilter("type", "Test type", "eq")],
"or",
),
],
[
NestedWhereFilters(
[SearchAPIWhereFilter("doi", "Test doi", "eq")],
[SearchAPIWhereFilter("license", "Test license", "like")],
"or",
),
],
"or",
id="With three OR boolean operators",
),
],
)
def test_valid_where_filter_with_nested_or_boolean_operator(
self,
test_request_filter,
test_entity_name,
expected_lhs,
expected_rhs,
expected_joining_operator,
):
filters = SearchAPIQueryFilterFactory.get_query_filter(
test_request_filter, test_entity_name,
)
assert len(filters) == 1
assert isinstance(filters[0], NestedWhereFilters)
assert repr(filters[0].lhs) == repr(expected_lhs)
assert repr(filters[0].rhs) == repr(expected_rhs)
assert filters[0].joining_operator == expected_joining_operator
assert repr(filters[0].search_api_query) == repr(
SearchAPIQuery(test_entity_name),
)
@pytest.mark.parametrize(
"test_request_filter, test_entity_name, expected_length"
", expected_included_entities",
[
pytest.param(
{"filter": {"include": [{"relation": "files"}]}},
"Dataset",
1,
[["files"]],
id="Single related model",
),
pytest.param(
{
"filter": {
"include": [{"relation": "files"}, {"relation": "instrument"}],
},
},
"Dataset",
1,
[["files", "instrument"]],
id="Multiple related models",
),
],
)
def test_valid_include_filter(
self,
test_request_filter,
test_entity_name,
expected_length,
expected_included_entities,
):
filters = SearchAPIQueryFilterFactory.get_query_filter(
test_request_filter, test_entity_name,
)
assert len(filters) == expected_length
for test_filter, included_entities in zip(filters, expected_included_entities):
if isinstance(test_filter, SearchAPIIncludeFilter):
assert test_filter.included_filters == included_entities
@pytest.mark.parametrize(
"test_request_filter, test_entity_name, expected_length"
", expected_included_entities, expected_where_filter_data"
", expected_nested_wheres",
[
pytest.param(
{
"filter": {
"include": [
{
"relation": "parameters",
"scope": {"where": {"name": "My parameter"}},
},
],
},
},
"Dataset",
2,
[["parameters"]],
[SearchAPIWhereFilter("parameters.name", "My parameter", "eq")],
"",
id="Property value with no operator",
),
pytest.param(
{
"filter": {
"include": [
{
"relation": "parameters",
"scope": {"where": {"name": {"ne": "My parameter"}}},
},
],
},
},
"Dataset",
2,
[["parameters"]],
[SearchAPIWhereFilter("parameters.name", "My parameter", "ne")],
"",
id="Property value with operator",
),
pytest.param(
{
"filter": {
"include": [
{
"relation": "files",
"scope": {"where": {"text": "file1"}},
},
],
},
},
"Dataset",
2,
[["files"]],
[],
[
NestedWhereFilters(
[], [SearchAPIWhereFilter("files.name", "file1", "like")], "or",
),
],
id="Text operator on defined field mapping to single field",
),
pytest.param(
{
"filter": {
"include": [
{
"relation": "parameters",
"scope": {"where": {"text": "My parameter"}},
},
],
},
},
"Dataset",
1,
[["parameters"]],
[],
[],
id="Text operator on non-defined field",
),
pytest.param(
{
"filter": {
"include": [
{
"relation": "documents",
"scope": {"where": {"text": "document1"}},
},
],
},
},
"Dataset",
2,
[["documents"]],
[],
[
NestedWhereFilters(
[SearchAPIWhereFilter("documents.title", "document1", "like")],
[
SearchAPIWhereFilter(
"documents.summary", "document1", "like",
),
],
"or",
),
],
id="Text operator on defined field mapping to multiple field",
),
pytest.param(
{
"filter": {
"include": [
{
"relation": "documents",
"scope": {
"where": {
"and": [
{"summary": "My Test Summary"},
{"title": "Test title"},
],
},
},
},
],
},
},
"Dataset",
2,
[["documents"]],
[],
[
NestedWhereFilters(
[
SearchAPIWhereFilter(
"documents.summary", "My Test Summary", "eq",
),
],
[SearchAPIWhereFilter("documents.title", "Test title", "eq")],
"and",
),
],
id="AND boolean operator",
),
pytest.param(
{
"filter": {
"include": [
{
"relation": "documents",
"scope": {
"where": {
"or": [
{"summary": "My Test Summary"},
{"title": "Test title"},
],
},
},
},
],
},
},
"Dataset",
2,
[["documents"]],
[],
[
NestedWhereFilters(
[
SearchAPIWhereFilter(
"documents.summary", "My Test Summary", "eq",
),
],
[SearchAPIWhereFilter("documents.title", "Test title", "eq")],
"or",
),
],
id="OR boolean operator",
),
pytest.param(
{
"filter": {
"include": [
{
"relation": "documents",
"scope": {
"where": {
"and": [
{
"and": [
{"summary": "My Test Summary"},
{"title": {"like": "Test title"}},
],
},
{
"and": [
{"pid": "Test pid"},
{"type": {"eq": "Test type"}},
],
},
{
"or": [
{"doi": "Test doi"},
{
"license": {
"like": "Test license",
},
},
],
},
],
},
},
},
],
},
},
"Dataset",
2,
[["documents"]],
[],
[
NestedWhereFilters(
[
NestedWhereFilters(
[
SearchAPIWhereFilter(
"documents.summary", "My Test Summary", "eq",
),
],
[
SearchAPIWhereFilter(
"documents.title", "Test title", "like",
),
],
"and",
),
NestedWhereFilters(
[
SearchAPIWhereFilter(
"documents.pid", "Test pid", "eq",
),
],
[
SearchAPIWhereFilter(
"documents.type", "Test type", "eq",
),
],
"and",
),
],
[
NestedWhereFilters(
[
SearchAPIWhereFilter(
"documents.doi", "Test doi", "eq",
),
],
[
SearchAPIWhereFilter(
"documents.license", "Test license", "like",
),
],
"or",
),
],
"and",
),
],
id="Nested AND boolean operator",
),
pytest.param(
{
"filter": {
"include": [
{
"relation": "documents",
"scope": {
"where": {
"or": [
{
"and": [
{"summary": "My Test Summary"},
{"title": {"like": "Test title"}},
],
},
{
"and": [
{"pid": "Test pid"},
{"type": {"eq": "Test type"}},
],
},
{
"or": [
{"doi": "Test doi"},
{
"license": {
"like": "Test license",
},
},
],
},
],
},
},
},
],
},
},
"Dataset",
2,
[["documents"]],
[],
[
NestedWhereFilters(
[
NestedWhereFilters(
[
SearchAPIWhereFilter(
"documents.summary", "My Test Summary", "eq",
),
],
[
SearchAPIWhereFilter(
"documents.title", "Test title", "like",
),
],
"and",
),
NestedWhereFilters(
[
SearchAPIWhereFilter(
"documents.pid", "Test pid", "eq",
),
],
[
SearchAPIWhereFilter(
"documents.type", "Test type", "eq",
),
],
"and",
),
],
[
NestedWhereFilters(
[
SearchAPIWhereFilter(
"documents.doi", "Test doi", "eq",
),
],
[
SearchAPIWhereFilter(
"documents.license", "Test license", "like",
),
],
"or",
),
],
"or",
),
],
id="Nested OR boolean operator",
),
pytest.param(
{
"filter": {
"include": [
{
"relation": "parameters",
"scope": {"where": {"name": "My parameter"}},
},
{
"relation": "documents",
"scope": {"where": {"title": "Document title"}},
},
],
},
},
"Dataset",
3,
[["parameters", "documents"]],
[
SearchAPIWhereFilter("parameters.name", "My parameter", "eq"),
SearchAPIWhereFilter("documents.title", "Document title", "eq"),
],
[],
id="Multiple related models",
),
pytest.param(
{
"filter": {
"include": [
{
"relation": "datasets",
"scope": {
"where": {"title": "Dataset 1"},
"include": [
{
"relation": "instrument",
"scope": {
"where": {"name": "Instrument 1"},
},
},
],
},
},
],
},
},
"Document",
3,
[["datasets.instrument"]],
[
SearchAPIWhereFilter("datasets.title", "Dataset 1", "eq"),
SearchAPIWhereFilter(
"datasets.instrument.name", "Instrument 1", "eq",
),
],
[],
id="Nested related models",
),
],
)
def test_valid_include_filter_with_where_filter_in_scope(
self,
test_request_filter,
test_entity_name,
expected_length,
expected_included_entities,
expected_where_filter_data,
expected_nested_wheres,
):
filters = SearchAPIQueryFilterFactory.get_query_filter(
test_request_filter, test_entity_name,
)
assert len(filters) == expected_length
for test_filter in filters:
if isinstance(test_filter, SearchAPIIncludeFilter):
assert test_filter.panosc_entity_name == test_entity_name
for expected_include in expected_included_entities:
assert test_filter.included_filters == expected_include
expected_included_entities.remove(expected_include)
if isinstance(test_filter, NestedWhereFilters):
for expected_nested in expected_nested_wheres:
assert repr(test_filter) == repr(expected_nested)
expected_nested_wheres.remove(expected_nested)
if isinstance(test_filter, SearchAPIWhereFilter):
for expected_where in expected_where_filter_data:
assert repr(test_filter) == repr(expected_where)
expected_where_filter_data.remove(expected_where)
@pytest.mark.parametrize(
"test_request_filter, test_entity_name, expected_length"
", expected_included_entities",
[
pytest.param(
{
"filter": {
"include": [
{
"relation": "datasets",
"scope": {"include": [{"relation": "parameters"}]},
},
],
},
},
"Document",
1,
[["datasets.parameters"]],
id="Single related model",
),
pytest.param(
{
"filter": {
"include": [
{
"relation": "datasets",
"scope": {
"include": [
{"relation": "parameters"},
{"relation": "instrument"},
],
},
},
],
},
},
"Document",
1,
[["datasets.parameters", "datasets.instrument"]],
id="Multiple related models",
),
pytest.param(
{
"filter": {
"include": [
{
"relation": "datasets",
"scope": {
"include": [
{
"relation": "documents",
"scope": {
"include": [{"relation": "parameters"}],
},
},
],
},
},
],
},
},
"Instrument",
1,
[["datasets.documents.parameters"]],
id="Nested related models",
),
],
)
def test_valid_include_filter_with_include_filter_in_scope(
self,
test_request_filter,
test_entity_name,
expected_length,
expected_included_entities,
):
filters = SearchAPIQueryFilterFactory.get_query_filter(
test_request_filter, test_entity_name,
)
assert len(filters) == expected_length
for test_filter in filters:
if isinstance(test_filter, SearchAPIIncludeFilter):
for expected_include in expected_included_entities:
assert test_filter.included_filters == expected_include
expected_included_entities.remove(expected_include)
@pytest.mark.parametrize(
"test_request_filter, expected_limit_value",
[
pytest.param({"filter": {"limit": 0}}, 0, id="Limit 0 values"),
pytest.param({"filter": {"limit": 50}}, 50, id="Limit 50 values"),
],
)
def test_valid_limit_filter(self, test_request_filter, expected_limit_value):
filters = SearchAPIQueryFilterFactory.get_query_filter(test_request_filter)
assert len(filters) == 1
assert isinstance(filters[0], SearchAPILimitFilter)
assert filters[0].limit_value == expected_limit_value
@pytest.mark.parametrize(
"test_request_filter, expected_skip_value",
[
pytest.param({"filter": {"skip": 0}}, 0, id="Skip 0 values"),
pytest.param({"filter": {"skip": 50}}, 50, id="Skip 50 values"),
],
)
def test_valid_skip_filter(
self, test_request_filter, expected_skip_value,
):
filters = SearchAPIQueryFilterFactory.get_query_filter(test_request_filter)
assert len(filters) == 1
assert isinstance(filters[0], SearchAPISkipFilter)
assert filters[0].skip_value == expected_skip_value
@pytest.mark.parametrize(
"test_request_filter, test_entity_name, expected_length"
", expected_included_entities, expected_where_filter_data"
", expected_nested_wheres, expected_limit_values, expected_skip_values",
[
pytest.param(
{
"filter": {
"where": {"title": "My Title"},
"include": [{"relation": "instrument"}],
"limit": 50,
"skip": 20,
},
},
"Dataset",
4,
[["instrument"]],
[SearchAPIWhereFilter("title", "My Title", "eq")],
[],
[50],
[20],
id="Simple case",
),
pytest.param(
{
"filter": {
"where": {
"and": [
{
"and": [
{"summary": "My Test Summary"},
{"title": {"like": "Test title"}},
],
},
{
"and": [
{"pid": "Test pid"},
{"type": {"eq": "Test type"}},
],
},
{
"or": [
{"doi": "Test doi"},
{"license": {"like": "Test license"}},
],
},
],
},
"include": [
{
"relation": "instrument",
"scope": {"where": {"name": "Instrument 1"}},
},
],
"limit": 50,
"skip": 20,
},
},
"Dataset",
5,
[["instrument"]],
[SearchAPIWhereFilter("instrument.name", "Instrument 1", "eq")],
[
NestedWhereFilters(
[
NestedWhereFilters(
[
SearchAPIWhereFilter(
"summary", "My Test Summary", "eq",
),
],
[SearchAPIWhereFilter("title", "Test title", "like")],
"and",
),
NestedWhereFilters(
[SearchAPIWhereFilter("pid", "Test pid", "eq")],
[SearchAPIWhereFilter("type", "Test type", "eq")],
"and",
),
],
[
NestedWhereFilters(
[SearchAPIWhereFilter("doi", "Test doi", "eq")],
[
SearchAPIWhereFilter(
"license", "Test license", "like",
),
],
"or",
),
],
"and",
),
],
[50],
[20],
id="Complex case",
),
],
)
def test_valid_filter_input_with_all_filters(
self,
test_request_filter,
test_entity_name,
expected_length,
expected_included_entities,
expected_where_filter_data,
expected_nested_wheres,
expected_limit_values,
expected_skip_values,
):
filters = SearchAPIQueryFilterFactory.get_query_filter(
test_request_filter, test_entity_name,
)
assert len(filters) == expected_length
for test_filter in filters:
if isinstance(test_filter, SearchAPIIncludeFilter):
for expected_include in expected_included_entities:
assert test_filter.included_filters == expected_include
expected_included_entities.remove(expected_include)
if isinstance(test_filter, NestedWhereFilters):
for expected_nested in expected_nested_wheres:
assert repr(test_filter) == repr(expected_nested)
expected_nested_wheres.remove(expected_nested)
if isinstance(test_filter, SearchAPIWhereFilter):
for expected_where in expected_where_filter_data:
assert repr(test_filter) == repr(expected_where)
expected_where_filter_data.remove(expected_where)
if isinstance(test_filter, SearchAPILimitFilter):
for expected_limit in expected_limit_values:
assert test_filter.limit_value == expected_limit
expected_limit_values.remove(expected_limit)
if isinstance(test_filter, SearchAPISkipFilter):
for expected_skip in expected_skip_values:
assert test_filter.skip_value == expected_skip
expected_skip_values.remove(expected_skip)
@pytest.mark.parametrize(
"test_request_filter",
[
pytest.param("invalid query filter input", id="Generally invalid input"),
pytest.param({"filter": {"test": "value"}}, id="Invalid filter name"),
pytest.param(
{
"filter": {
"include": [
{
"relation": "parameters",
"scope": {"text": "My parameter"},
},
],
},
},
id="Invalid scope syntax on include filter",
),
pytest.param(
{
"filter": {
"include": [
{"relation": "parameters", "scope": {"limit": 50}},
],
},
},
id="Unsupported limit filter in scope of include filter",
),
pytest.param(
{
"filter": {
"include": [{"relation": "parameters", "scope": {"skip": 20}}],
},
},
id="Unsupported skip filter in scope of include filter",
),
pytest.param(
{"filter": {"where": {"isPublic": {"lt": True}}}},
id="Unsupported operator in where filter with boolean value",
),
],
)
def test_invalid_filter_input(self, test_request_filter):
with pytest.raises(FilterError):
SearchAPIQueryFilterFactory.get_query_filter(test_request_filter)
@pytest.mark.parametrize(
"filter_input, expected_return",
[
pytest.param(
{"property": "value"},
("property", "value", "eq"),
id="No operator specified (string)",
),
pytest.param(
{"property": False},
("property", False, "eq"),
id="No operator specified (bool)",
),
pytest.param(
{"property": 5},
("property", 5, "eq"),
id="No operator specified (int)",
),
pytest.param(
{"property": {"eq": "value"}},
("property", "value", "eq"),
id="Specific operator given in input (eq)",
),
pytest.param(
{"property": {"neq": "value"}},
("property", "value", "neq"),
id="Specific operator given in input (neq)",
),
pytest.param(
{"property": {"gt": "value"}},
("property", "value", "gt"),
id="Specific operator given in input (gt)",
),
pytest.param(
{"isPublic": True},
("isPublic", True, "eq"),
id="No operator specified using isPublic",
),
pytest.param(
{"isPublic": {"eq": False}},
("isPublic", False, "eq"),
id="Specific operator using isPublic (eq)",
),
pytest.param(
{"isPublic": {"neq": True}},
("isPublic", True, "neq"),
id="Specific operator using isPublic (neq)",
),
],
)
def test_valid_get_condition_values(self, filter_input, expected_return):
test_condition_values = SearchAPIQueryFilterFactory.get_condition_values(
filter_input,
)
assert test_condition_values == expected_return
@pytest.mark.parametrize(
"filter_input",
[
pytest.param({"isPublic": {"lt": True}}, id="isPublic invalid operator"),
pytest.param(
{"name": {"gt": False}}, id="Invalid operator on boolean value",
),
],
)
def test_invalid_get_condition_values(self, filter_input):
with pytest.raises(FilterError):
SearchAPIQueryFilterFactory.get_condition_values(filter_input)
@pytest.mark.parametrize(
"test_filter, entity_name, expected_field_name",
[
pytest.param(
SearchAPIWhereFilter("name", "test name", "eq"),
"File",
["File.name"],
id="Single where filter",
),
pytest.param(
[
SearchAPIWhereFilter("name", "test name", "eq"),
SearchAPIWhereFilter("id", 3, "eq"),
],
"File",
["File.name", "File.id"],
id="List of where filters",
),
pytest.param(
NestedWhereFilters(
[SearchAPIWhereFilter("name", "test name", "eq")],
[SearchAPIWhereFilter("id", 3, "eq")],
"OR",
SearchAPIQuery("File"),
),
"File",
["File.name", "File.id"],
id="NestedWhereFilters object",
),
],
)
def test_prefix_entity_name(self, test_filter, entity_name, expected_field_name):
SearchAPIQueryFilterFactory.prefix_where_filter_field_with_entity_name(
test_filter, entity_name,
)
if not isinstance(test_filter, list):
test_filter = [test_filter]
for filter_, field_name in zip(test_filter, expected_field_name):
if isinstance(filter_, NestedWhereFilters):
assert filter_.lhs[0].field == expected_field_name[0]
assert filter_.rhs[0].field == expected_field_name[1]
else:
assert filter_.field == field_name
# assert test_filter.field == expected_output
| 38.866848
| 88
| 0.31364
| 4,420
| 85,818
| 5.947964
| 0.036878
| 0.037657
| 0.047851
| 0.039559
| 0.876075
| 0.836288
| 0.806771
| 0.780791
| 0.760631
| 0.732446
| 0
| 0.005187
| 0.579878
| 85,818
| 2,207
| 89
| 38.884459
| 0.723997
| 0.000501
| 0
| 0.688653
| 0
| 0
| 0.158593
| 0.004698
| 0
| 0
| 0
| 0
| 0.027675
| 1
| 0.008303
| false
| 0
| 0.002768
| 0
| 0.011531
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5a53f80497feec5b4ae7f1b14e869cf954a8099c
| 4,085
|
py
|
Python
|
maps/castle.py
|
dgottlieb/storyteller
|
0cf430b1372a5aed95d08ebe1aff268314f362e1
|
[
"WTFPL"
] | 3
|
2015-01-29T05:24:09.000Z
|
2016-11-01T05:21:27.000Z
|
maps/castle.py
|
dgottlieb/storyteller
|
0cf430b1372a5aed95d08ebe1aff268314f362e1
|
[
"WTFPL"
] | null | null | null |
maps/castle.py
|
dgottlieb/storyteller
|
0cf430b1372a5aed95d08ebe1aff268314f362e1
|
[
"WTFPL"
] | null | null | null |
castle = [
"..W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.",
"..W.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.W.",
"..W.S.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.B.B.T.T.T.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.B.B.T.K0T.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.B.B.N0B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.B.B.M0B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..E0E0B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..E0E0B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..E0E0B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..E0E0B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..E0E0B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.B.S.W.",
"..W.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.S.W.",
"..W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W.W."]
import pygame
import chars
import items
import npc_mod
import tiles
import tiled_screen
import world
import zone
class Castle(zone.Zone):
def __init__(self):
zone.Zone.__init__(self)
self.map = self.parse_map(castle)
self.music_file = 'sounds/dw1castle.mid'
def parse_tile(self, tile_str, row, column):
if tile_str == 'E0':
return {'tile': None,
'exit': (lambda: world.World(), ('C0', 0), tiled_screen.DOWN)}
if tile_str == 'N0':
walk_path = {(row, column): ((1, 0), 2),
(row + 1, column): ((0, 1), 2),
(row + 1, column + 1): ((-1, 0), 2),
(row, column + 1): ((0, -1), 0)}
knight = npc_mod.NPC(chars.get_knight(), row, column, walk_path)
return {'tile': tiles.brick_tile,
'npc': knight}
if tile_str == 'K0':
walk_path = {}
king = npc_mod.NPC(chars.get_king(), row, column, walk_path)
king.set_dialogue([["I am the King."], ["My life is awesome."]])
return {'tile': tiles.brick_tile,
'npc': king}
if tile_str == 'M0':
walk_path = {}
merchant = npc_mod.Merchant(chars.get_merchant(), row, column, walk_path,
[items.stick])
merchant.set_greeting([["What would you like to do?"]])
return {'tile': tiles.brick_tile,
'npc': merchant}
def special_actions(self, tile):
pass
| 52.371795
| 91
| 0.484211
| 1,338
| 4,085
| 1.450673
| 0.056801
| 0.880989
| 1.279753
| 1.658939
| 0.658423
| 0.640907
| 0.599176
| 0.599176
| 0.599176
| 0.599176
| 0
| 0.010327
| 0.146634
| 4,085
| 77
| 92
| 53.051948
| 0.546472
| 0
| 0
| 0.382353
| 0
| 0.397059
| 0.610526
| 0.58164
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044118
| false
| 0.014706
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5a72fd1147495705ec88c1da3b593f3c48780061
| 258,348
|
py
|
Python
|
MEMOCODE_2018_Benchmarks/RABBIT_and_wolf_game/Python/test.py
|
PRETgroup/sann
|
ae94ede80570f0d27f2f9c658dda7661c72c3f4a
|
[
"BSD-2-Clause"
] | null | null | null |
MEMOCODE_2018_Benchmarks/RABBIT_and_wolf_game/Python/test.py
|
PRETgroup/sann
|
ae94ede80570f0d27f2f9c658dda7661c72c3f4a
|
[
"BSD-2-Clause"
] | null | null | null |
MEMOCODE_2018_Benchmarks/RABBIT_and_wolf_game/Python/test.py
|
PRETgroup/sann
|
ae94ede80570f0d27f2f9c658dda7661c72c3f4a
|
[
"BSD-2-Clause"
] | null | null | null |
import matplotlib.pyplot as plt
from ANN import *
from Game import *
from GA import *
from Async_Game import *
from Async_Game_2 import *
# ------------ TEST RUNS --------------
# TEST FOR DETERMINISM
# ASYNC 2 RUN
# start = timer()
new_state = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 0, 0, 0, 0, 4, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 2, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 5, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 3, 0, 0, 0, 0, 0, 1],
[1, 0, 2, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
#
# new_state2 = [
# [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
# [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1],
# [1, 2, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2],
# [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
# [1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# ]
# game = AGame2(size=20, rounds=100, turns=50, num_exits=4)
#
# count = 0
# count_wolf = 0
# count_rabbit = 0
# for x in range(1000):
# print "GAME", x
# game.is_custom_state = False
# state = game.reset()
# new_state = [[0 for _ in range(len(state))] for _ in range(len(state))]
# for i in range(len(state)):
# for j in range(len(state)):
# new_state[i][j] = state[i][j]
#
# game.set_state(new_state)
# game.import_from_file("ANN_22_03_0\Attempt_2_100_50.txt", 98)
# game.start(pause=False)
# if not (game.score[0] % 100 == 0) or not (game.score[1] % 100 == 0):
# count = count + 1
# print "(Odd score, ",
# if game.score[0] == 0:
# print "Rabbit)",
# count_rabbit = count_rabbit + 1
# elif game.score[1] == 0:
# print "Wolf)",
# count_wolf = count_wolf + 1
# elif np.abs(game.score[0]) < np.abs(game.score[1]):
# print "Wolf)",
# count_wolf = count_wolf + 1
# elif np.abs(game.score[1]) < np.abs(game.score[0]):
# print "Rabbit)",
# count_rabbit = count_rabbit + 1
# print "Final score:", game.score
# print "Number of non-deterministic scores:", count
# print "Caused by wolves:", count_wolf
# print "Caused by rabbit:", count_rabbit
#
# end = timer()
# print "Time taken:", (end - start)
# -------- END ASYNC 2 ---------
# --------- END DETERMINISM TEST -------------
# TEST FOR TIMING
# round_average = []
# rounds = []
# for i in range(50):
# round_average.append(0)
# rounds.append([])
#
# print "(Rounds ->", i, "):",
# for x in range(100):
# start = timer()
# game = AGame2(size=10, rounds=i, turns=50, num_exits=3)
# game.import_from_file("ANN_22_03_0\Attempt_2_100_50.txt", 98)
# game.start(pause=False)
#
# end = timer()
# rounds[i].append(end - start)
# round_average[i] = round_average[i] + (rounds[i][x])
#
# print rounds[i]
# round_average[i] = round_average[i] / (x + 1)
# print "Round averages:", round_average
#
# # GRAPH
# plt.figure()
# x = np.arange(0, len(round_average))
# y = round_average
# line1, = plt.plot(x, y, label="Average")
# plt.xlabel("Number of rounds")
# plt.ylabel("Average time taken")
# plt.title("Average time taken to run the game in relation to the number of rounds the game is run")
# plt.legend(handles=[line1])
# name = 'Asynchronous_Tests/TimingTestAverage4.png'
# plt.savefig(name)
# plt.show()
# ---------------- END TIMING TEST ----------------
# GAME RUN
start = timer()
game = Game(size=10, rounds=1, turns=10, num_exits=3)
# for i in range(5):
# name1 = "ANN_26_03_0\Attempt_"
# name2 = "_100_50.txt"
# name = name1 + str(i) + name2
# game.evaluate_file(100, name, debug=True)
#
# end = timer()
# print "Time taken:", (end - start)
game.set_state(new_state)
game.import_from_file("ANN_22_03_0\Attempt_2_100_50.txt", 98)
# w_inputs = [0.35355339059327373, 0.875, 0.24253562503633297, 0.46101043481131532, 0.25, 0, 0.17677669529663687, 0,
# 0.25, 0, 0.17677669529663687, 0.23570226039551581, 0.20000000000000001, 0, 0.1414213562373095,
# 0.17677669529663687, 0.20000000000000001, 0, 0.17677669529663687, 0.23570226039551581]
#
# w_inputs2 = [0, 0, 0, 0, 0.25, 0, 0.176776695297, 0, 0.2, 0, 0.141421356237, 0, 0.2, 0, 0.176776695297, 0, 0.25, 0, 0.176776695297, 0]
#
# print game.wolf1.ann.run(w_inputs)
# print game.wolf1.ann.run(w_inputs2)
# print game.rabbit.ann.weights[0]
# print game.rabbit.ann.weights[1]
# print game.wolf1.ann.weights[0]
# print game.wolf1.ann.weights[1]
# print game.wolf2.ann.weights[0]
# print game.wolf2.ann.weights[1]
# game.start(pause=True)
# in each instance, generate random positions for each animal, then run inputs for wolf1 and produce outputs to match
# print "float input_set[1000][20] = {"
# set_outputs = []
# for i in range(1000):
# # determine random positions for all animals
# x = rand.randint(1, game.size - 2)
# y = rand.randint(1, game.size - 2)
# while [x, y] in game.exits:
# x = rand.randint(1, game.size - 2)
# game.wolf1.pos = [x, y] # ANN initialised in animal __init__
#
# x = rand.randint(1, game.size - 2)
# y = rand.randint(1, game.size - 2)
# while [x, y] == game.wolf1.pos or [x, y] in game.exits:
# x = rand.randint(1, game.size - 2)
# game.wolf2.pos = [x, y]
#
# x = rand.randint(1, game.size - 2)
# y = rand.randint(1, game.size - 2)
# while [x, y] == game.wolf1.pos or [x, y] == game.wolf2.pos or [x, y] in game.exits:
# y = rand.randint(1, game.size - 2)
# game.rabbit.pos = [x, y]
#
# set_inputs = game.get_inputs(game.wolf1.pos)
#
# print "{",
# for j in range(19):
# print set_inputs[j], ",",
# print set_inputs[19], "},"
#
# # print set_inputs
# set_outputs.append(game.wolf1.ann.run(set_inputs))
# print "};"
# print set_outputs
#
# end = timer()
# print "Time taken:", (end - start)
output_set1 = [[0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.9204991475269404, 0.997820405498567, 0.004755920056928505, 0.00027945528545256926, 0.31836246282515795, 0.5098160564019794, 0.0638457240331752, 0.5113664414372071], [0.9226443244561893, 0.9999047545515034, 0.003817856913325416, 0.0026048172484790626, 0.8323676248245497, 0.8508602288493949, 0.8736538657332169, 0.012228440574850534], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.04848322956586269, 0.6519622002511831, 0.003934940622089357, 0.007727537091957959, 0.925691919989501, 0.9008818405453288, 0.9996227820171267, 0.8388979769520879], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.3496790238264589, 0.9951780835506667, 0.007652476325125691, 0.016407282129121008, 0.4781565714146621, 0.9680365649603411, 0.9980740663951763, 0.8146739775981933], [0.7876567498967855, 0.994615825699678, 0.0009521703700097382, 0.027907783035342672, 0.17821642193876197, 0.978884650851592, 0.9847004284184796, 0.9882120357135643], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.975712869813101, 0.9999001373682023, 0.0016433498121412875, 0.00232889889758488, 0.6572861064040806, 0.9605841545972731, 0.4331787937043679, 0.039429061212534595], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.9204991475269404, 0.997820405498567, 0.004755920056928505, 0.00027945528545256926, 0.31836246282515795, 0.5098160564019794, 0.0638457240331752, 0.5113664414372071], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.8712277660710959, 0.9998792147767195, 0.009233476741708865, 0.006651887470136875, 0.2846353876132319, 0.8606757345444841, 0.905799724846877, 0.10941869528527111], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.9491101867098894, 0.9983663541388137, 0.009796995267325935, 0.00029265245163625337, 0.9888247829552145, 0.501870738402459, 0.9936141020286289, 0.014508537600457536], [0.7664603779054239, 0.9978133594063298, 0.030699500602800852, 0.0014420041420162499, 0.5525253184104642, 0.2475590149715773, 0.6243340601317057, 0.3072161596577674], [0.04848322956586269, 0.6519622002511831, 0.003934940622089357, 0.007727537091957959, 0.925691919989501, 0.9008818405453288, 0.9996227820171267, 0.8388979769520879], [0.6961927609998032, 0.7995011681043653, 0.007478646672682654, 0.0006575746380115004, 0.8535660912449907, 0.005791708319848361, 0.5084921008279353, 0.29734664554467366], [0.8721615715499735, 0.9999086487036207, 0.0052438766262562575, 0.009269188454813193, 0.30045710434598644, 0.9226049121166215, 0.919673621976729, 0.11699230898834169], [0.31246477046969495, 0.9922885565366311, 0.010622950850010143, 0.012353719030255849, 0.5544625807993329, 0.9562633327178526, 0.9981487629810418, 0.8235470252929691], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.9906780299956643, 0.9998534146447304, 0.00043349919664669275, 0.0021346315842544383, 0.9027977918684693, 0.9787792894670917, 0.5383102021369034, 0.020985795792433527], [0.9394784796966786, 0.9997140044878048, 0.005478297748100236, 0.0008731998901328106, 0.2989775965870789, 0.7627778793761508, 0.20634254240689162, 0.21503350720998343], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.6934857767802269, 0.7838636051546535, 0.00978088179529716, 0.000589466785014496, 0.7856126934390497, 0.005601083146058356, 0.36953863915183255, 0.3925057375012165], [0.6934857767802269, 0.7838636051546535, 0.00978088179529716, 0.000589466785014496, 0.7856126934390497, 0.005601083146058356, 0.36953863915183255, 0.3925057375012165], [0.31246477046969495, 0.9922885565366311, 0.010622950850010143, 0.012353719030255849, 0.5544625807993329, 0.9562633327178526, 0.9981487629810418, 0.8235470252929691], [0.31246477046969495, 0.9922885565366311, 0.010622950850010143, 0.012353719030255849, 0.5544625807993329, 0.9562633327178526, 0.9981487629810418, 0.8235470252929691], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.8823009792529095, 0.5060423110732729, 3.0857599388026564e-05, 0.015419420348934627, 0.9922375740571194, 0.6075090773322849, 0.8615946862392778, 0.7323110858792001], [0.9128455984341999, 0.9999544829433648, 0.004761634982689357, 0.006700794779618401, 0.3874082764581125, 0.9329089125208433, 0.8465610034125904, 0.0596793947015072], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.9394784796966786, 0.9997140044878048, 0.005478297748100236, 0.0008731998901328106, 0.2989775965870789, 0.7627778793761508, 0.20634254240689162, 0.21503350720998343], [0.9940756574013799, 0.999851937423747, 2.653066991749665e-05, 0.006737893689430976, 0.9067979203601093, 0.9986603148308576, 0.14473133131990562, 0.05638620510711369], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.8061349450965536, 0.999767150024817, 0.005080980536513046, 0.01040826816049117, 0.3409665942145532, 0.9097666313819003, 0.9870252543663042, 0.08656560010172533], [0.43011239510426436, 0.9100478388500701, 0.0015393320922250208, 0.00045918819404481973, 0.9979542901758269, 0.8626589457233201, 0.9932258187487738, 0.024102697425247333], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.9569978143865792, 0.9999502994171116, 0.001647766104709314, 0.006770414812663102, 0.6078947163963356, 0.9739969468541787, 0.7712208386517876, 0.03224650722349818], [0.9226443244561893, 0.9999047545515034, 0.003817856913325416, 0.0026048172484790626, 0.8323676248245497, 0.8508602288493949, 0.8736538657332169, 0.012228440574850534], [0.7664603779054239, 0.9978133594063298, 0.030699500602800852, 0.0014420041420162499, 0.5525253184104642, 0.2475590149715773, 0.6243340601317057, 0.3072161596577674], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.34611195464250893, 0.9993957670342325, 0.00801798220359197, 0.005710054448791813, 0.2542505785607144, 0.9526348574950757, 0.9984054781924007, 0.2510543692146448], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.31246477046969495, 0.9922885565366311, 0.010622950850010143, 0.012353719030255849, 0.5544625807993329, 0.9562633327178526, 0.9981487629810418, 0.8235470252929691], [0.7664603779054239, 0.9978133594063298, 0.030699500602800852, 0.0014420041420162499, 0.5525253184104642, 0.2475590149715773, 0.6243340601317057, 0.3072161596577674], [0.9135271257100342, 0.996591601811556, 0.014580504042979716, 0.0002899709156167404, 0.9912420642891241, 0.5609059388773838, 0.9936845485989105, 0.02069013167663768], [0.8721615715499735, 0.9999086487036207, 0.0052438766262562575, 0.009269188454813193, 0.30045710434598644, 0.9226049121166215, 0.919673621976729, 0.11699230898834169], [0.7664603779054239, 0.9978133594063298, 0.030699500602800852, 0.0014420041420162499, 0.5525253184104642, 0.2475590149715773, 0.6243340601317057, 0.3072161596577674], [0.9204991475269404, 0.997820405498567, 0.004755920056928505, 0.00027945528545256926, 0.31836246282515795, 0.5098160564019794, 0.0638457240331752, 0.5113664414372071], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.8894884311869264, 0.9997973322992432, 0.010176229478222287, 0.003641320387798874, 0.41977764722759664, 0.7149835924672313, 0.8334698236404028, 0.060271472885201315], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.7664603779054239, 0.9978133594063298, 0.030699500602800852, 0.0014420041420162499, 0.5525253184104642, 0.2475590149715773, 0.6243340601317057, 0.3072161596577674], [0.975712869813101, 0.9999001373682023, 0.0016433498121412875, 0.00232889889758488, 0.6572861064040806, 0.9605841545972731, 0.4331787937043679, 0.039429061212534595], [0.8642116426125306, 0.9999076857072546, 0.0019158998972424609, 0.012520957717797179, 0.35861923963866427, 0.9596018297693715, 0.925928134148489, 0.12797202962078064], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.8849374301089054, 0.9998694186934906, 0.005269414155628153, 0.003970307908163266, 0.7044132096685062, 0.8262784437953502, 0.7945511788980267, 0.020190231346340615], [0.34611195464250893, 0.9993957670342325, 0.00801798220359197, 0.005710054448791813, 0.2542505785607144, 0.9526348574950757, 0.9984054781924007, 0.2510543692146448], [0.9872672484915054, 0.9999085260266926, 3.251124892882648e-05, 0.012813790423038318, 0.8739435201397908, 0.9994862817595647, 0.14787339150433546, 0.0903285883262022], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.04848322956586269, 0.6519622002511831, 0.003934940622089357, 0.007727537091957959, 0.925691919989501, 0.9008818405453288, 0.9996227820171267, 0.8388979769520879], [0.9545750084640168, 0.8010221013459997, 0.0010768383774790136, 4.5285242317457465e-05, 0.7122118355741406, 0.15925456350948017, 0.01931318272553877, 0.9275966551817966], [0.9940756574013799, 0.999851937423747, 2.653066991749665e-05, 0.006737893689430976, 0.9067979203601093, 0.9986603148308576, 0.14473133131990562, 0.05638620510711369], [0.9696309451921452, 0.9999996719273639, 0.00021927544176994542, 0.004065877251051302, 0.6149022306729102, 0.9999633304112948, 0.001980127323861929, 0.98920505416126], [0.8061349450965536, 0.999767150024817, 0.005080980536513046, 0.01040826816049117, 0.3409665942145532, 0.9097666313819003, 0.9870252543663042, 0.08656560010172533], [0.9569978143865792, 0.9999502994171116, 0.001647766104709314, 0.006770414812663102, 0.6078947163963356, 0.9739969468541787, 0.7712208386517876, 0.03224650722349818], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.9924837778486827, 0.9989268643102444, 0.0012191648870903842, 0.0002908986915298129, 0.9926994542722708, 0.6874496494738243, 0.9839911840642187, 0.01377555881949025], [0.9906780299956643, 0.9998534146447304, 0.00043349919664669275, 0.0021346315842544383, 0.9027977918684693, 0.9787792894670917, 0.5383102021369034, 0.020985795792433527], [0.8894884311869264, 0.9997973322992432, 0.010176229478222287, 0.003641320387798874, 0.41977764722759664, 0.7149835924672313, 0.8334698236404028, 0.060271472885201315], [0.04848322956586269, 0.6519622002511831, 0.003934940622089357, 0.007727537091957959, 0.925691919989501, 0.9008818405453288, 0.9996227820171267, 0.8388979769520879], [0.43011239510426436, 0.9100478388500701, 0.0015393320922250208, 0.00045918819404481973, 0.9979542901758269, 0.8626589457233201, 0.9932258187487738, 0.024102697425247333], [0.5749599830101485, 0.9687977761988059, 0.0023813640235534695, 0.1541842525112368, 0.7412670211815588, 0.8581884113393773, 0.9810544594619939, 0.9945756272130561], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.9491101867098894, 0.9983663541388137, 0.009796995267325935, 0.00029265245163625337, 0.9888247829552145, 0.501870738402459, 0.9936141020286289, 0.014508537600457536], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.8894884311869264, 0.9997973322992432, 0.010176229478222287, 0.003641320387798874, 0.41977764722759664, 0.7149835924672313, 0.8334698236404028, 0.060271472885201315], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.8721615715499735, 0.9999086487036207, 0.0052438766262562575, 0.009269188454813193, 0.30045710434598644, 0.9226049121166215, 0.919673621976729, 0.11699230898834169], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.9394784796966786, 0.9997140044878048, 0.005478297748100236, 0.0008731998901328106, 0.2989775965870789, 0.7627778793761508, 0.20634254240689162, 0.21503350720998343], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.8061349450965536, 0.999767150024817, 0.005080980536513046, 0.01040826816049117, 0.3409665942145532, 0.9097666313819003, 0.9870252543663042, 0.08656560010172533], [0.04141089323783879, 0.9910551102522079, 1.654425071079162e-05, 0.015903012386923807, 0.9188840210179037, 0.9607844596952501, 0.988456321290572, 0.9236698877001335], [0.9924837778486827, 0.9989268643102444, 0.0012191648870903842, 0.0002908986915298129, 0.9926994542722708, 0.6874496494738243, 0.9839911840642187, 0.01377555881949025], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.9491101867098894, 0.9983663541388137, 0.009796995267325935, 0.00029265245163625337, 0.9888247829552145, 0.501870738402459, 0.9936141020286289, 0.014508537600457536], [0.34611195464250893, 0.9993957670342325, 0.00801798220359197, 0.005710054448791813, 0.2542505785607144, 0.9526348574950757, 0.9984054781924007, 0.2510543692146448], [0.9924837778486827, 0.9989268643102444, 0.0012191648870903842, 0.0002908986915298129, 0.9926994542722708, 0.6874496494738243, 0.9839911840642187, 0.01377555881949025], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.9135271257100342, 0.996591601811556, 0.014580504042979716, 0.0002899709156167404, 0.9912420642891241, 0.5609059388773838, 0.9936845485989105, 0.02069013167663768], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.43011239510426436, 0.9100478388500701, 0.0015393320922250208, 0.00045918819404481973, 0.9979542901758269, 0.8626589457233201, 0.9932258187487738, 0.024102697425247333], [0.8823009792529095, 0.5060423110732729, 3.0857599388026564e-05, 0.015419420348934627, 0.9922375740571194, 0.6075090773322849, 0.8615946862392778, 0.7323110858792001], [0.9740046107415464, 0.9999348677511988, 0.0011280131366589966, 0.003142384557448156, 0.8576418924567042, 0.9545333286657802, 0.7904262178559613, 0.014075106739490166], [0.9135271257100342, 0.996591601811556, 0.014580504042979716, 0.0002899709156167404, 0.9912420642891241, 0.5609059388773838, 0.9936845485989105, 0.02069013167663768], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.8823009792529095, 0.5060423110732729, 3.0857599388026564e-05, 0.015419420348934627, 0.9922375740571194, 0.6075090773322849, 0.8615946862392778, 0.7323110858792001], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.6934857767802269, 0.7838636051546535, 0.00978088179529716, 0.000589466785014496, 0.7856126934390497, 0.005601083146058356, 0.36953863915183255, 0.3925057375012165], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.9940756574013799, 0.999851937423747, 2.653066991749665e-05, 0.006737893689430976, 0.9067979203601093, 0.9986603148308576, 0.14473133131990562, 0.05638620510711369], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.04141089323783879, 0.9910551102522079, 1.654425071079162e-05, 0.015903012386923807, 0.9188840210179037, 0.9607844596952501, 0.988456321290572, 0.9236698877001335], [0.9820414006235906, 0.9999135464442158, 0.000251464690814552, 0.0067050602497352035, 0.8462159835954384, 0.9919592161371642, 0.6597365357308984, 0.019650064647707154], [0.8849374301089054, 0.9998694186934906, 0.005269414155628153, 0.003970307908163266, 0.7044132096685062, 0.8262784437953502, 0.7945511788980267, 0.020190231346340615], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.7664603779054239, 0.9978133594063298, 0.030699500602800852, 0.0014420041420162499, 0.5525253184104642, 0.2475590149715773, 0.6243340601317057, 0.3072161596577674], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.9696309451921452, 0.9999996719273639, 0.00021927544176994542, 0.004065877251051302, 0.6149022306729102, 0.9999633304112948, 0.001980127323861929, 0.98920505416126], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.31246477046969495, 0.9922885565366311, 0.010622950850010143, 0.012353719030255849, 0.5544625807993329, 0.9562633327178526, 0.9981487629810418, 0.8235470252929691], [0.9492409733663436, 0.9998720501202061, 0.00026265005707428097, 0.014216156794433854, 0.7253270042206533, 0.9930368078663476, 0.835602039566071, 0.052035705737287156], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.3496790238264589, 0.9951780835506667, 0.007652476325125691, 0.016407282129121008, 0.4781565714146621, 0.9680365649603411, 0.9980740663951763, 0.8146739775981933], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.6961927609998032, 0.7995011681043653, 0.007478646672682654, 0.0006575746380115004, 0.8535660912449907, 0.005791708319848361, 0.5084921008279353, 0.29734664554467366], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.5749599830101485, 0.9687977761988059, 0.0023813640235534695, 0.1541842525112368, 0.7412670211815588, 0.8581884113393773, 0.9810544594619939, 0.9945756272130561], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.9872672484915054, 0.9999085260266926, 3.251124892882648e-05, 0.012813790423038318, 0.8739435201397908, 0.9994862817595647, 0.14787339150433546, 0.0903285883262022], [0.9696309451921452, 0.9999996719273639, 0.00021927544176994542, 0.004065877251051302, 0.6149022306729102, 0.9999633304112948, 0.001980127323861929, 0.98920505416126], [0.6961927609998032, 0.7995011681043653, 0.007478646672682654, 0.0006575746380115004, 0.8535660912449907, 0.005791708319848361, 0.5084921008279353, 0.29734664554467366], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.9135271257100342, 0.996591601811556, 0.014580504042979716, 0.0002899709156167404, 0.9912420642891241, 0.5609059388773838, 0.9936845485989105, 0.02069013167663768], [0.9696309451921452, 0.9999996719273639, 0.00021927544176994542, 0.004065877251051302, 0.6149022306729102, 0.9999633304112948, 0.001980127323861929, 0.98920505416126], [0.8823009792529095, 0.5060423110732729, 3.0857599388026564e-05, 0.015419420348934627, 0.9922375740571194, 0.6075090773322849, 0.8615946862392778, 0.7323110858792001], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.9924837778486827, 0.9989268643102444, 0.0012191648870903842, 0.0002908986915298129, 0.9926994542722708, 0.6874496494738243, 0.9839911840642187, 0.01377555881949025], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.9924837778486827, 0.9989268643102444, 0.0012191648870903842, 0.0002908986915298129, 0.9926994542722708, 0.6874496494738243, 0.9839911840642187, 0.01377555881949025], [0.9872672484915054, 0.9999085260266926, 3.251124892882648e-05, 0.012813790423038318, 0.8739435201397908, 0.9994862817595647, 0.14787339150433546, 0.0903285883262022], [0.3496790238264589, 0.9951780835506667, 0.007652476325125691, 0.016407282129121008, 0.4781565714146621, 0.9680365649603411, 0.9980740663951763, 0.8146739775981933], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.9492409733663436, 0.9998720501202061, 0.00026265005707428097, 0.014216156794433854, 0.7253270042206533, 0.9930368078663476, 0.835602039566071, 0.052035705737287156], [0.9492409733663436, 0.9998720501202061, 0.00026265005707428097, 0.014216156794433854, 0.7253270042206533, 0.9930368078663476, 0.835602039566071, 0.052035705737287156], [0.9569978143865792, 0.9999502994171116, 0.001647766104709314, 0.006770414812663102, 0.6078947163963356, 0.9739969468541787, 0.7712208386517876, 0.03224650722349818], [0.7876567498967855, 0.994615825699678, 0.0009521703700097382, 0.027907783035342672, 0.17821642193876197, 0.978884650851592, 0.9847004284184796, 0.9882120357135643], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.6961927609998032, 0.7995011681043653, 0.007478646672682654, 0.0006575746380115004, 0.8535660912449907, 0.005791708319848361, 0.5084921008279353, 0.29734664554467366], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.8721615715499735, 0.9999086487036207, 0.0052438766262562575, 0.009269188454813193, 0.30045710434598644, 0.9226049121166215, 0.919673621976729, 0.11699230898834169], [0.8894884311869264, 0.9997973322992432, 0.010176229478222287, 0.003641320387798874, 0.41977764722759664, 0.7149835924672313, 0.8334698236404028, 0.060271472885201315], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.8642116426125306, 0.9999076857072546, 0.0019158998972424609, 0.012520957717797179, 0.35861923963866427, 0.9596018297693715, 0.925928134148489, 0.12797202962078064], [0.8849374301089054, 0.9998694186934906, 0.005269414155628153, 0.003970307908163266, 0.7044132096685062, 0.8262784437953502, 0.7945511788980267, 0.020190231346340615], [0.6961927609998032, 0.7995011681043653, 0.007478646672682654, 0.0006575746380115004, 0.8535660912449907, 0.005791708319848361, 0.5084921008279353, 0.29734664554467366], [0.04141089323783879, 0.9910551102522079, 1.654425071079162e-05, 0.015903012386923807, 0.9188840210179037, 0.9607844596952501, 0.988456321290572, 0.9236698877001335], [0.34611195464250893, 0.9993957670342325, 0.00801798220359197, 0.005710054448791813, 0.2542505785607144, 0.9526348574950757, 0.9984054781924007, 0.2510543692146448], [0.9226443244561893, 0.9999047545515034, 0.003817856913325416, 0.0026048172484790626, 0.8323676248245497, 0.8508602288493949, 0.8736538657332169, 0.012228440574850534], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.04848322956586269, 0.6519622002511831, 0.003934940622089357, 0.007727537091957959, 0.925691919989501, 0.9008818405453288, 0.9996227820171267, 0.8388979769520879], [0.9924837778486827, 0.9989268643102444, 0.0012191648870903842, 0.0002908986915298129, 0.9926994542722708, 0.6874496494738243, 0.9839911840642187, 0.01377555881949025], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.43011239510426436, 0.9100478388500701, 0.0015393320922250208, 0.00045918819404481973, 0.9979542901758269, 0.8626589457233201, 0.9932258187487738, 0.024102697425247333], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.7876567498967855, 0.994615825699678, 0.0009521703700097382, 0.027907783035342672, 0.17821642193876197, 0.978884650851592, 0.9847004284184796, 0.9882120357135643], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.9696309451921452, 0.9999996719273639, 0.00021927544176994542, 0.004065877251051302, 0.6149022306729102, 0.9999633304112948, 0.001980127323861929, 0.98920505416126], [0.8823009792529095, 0.5060423110732729, 3.0857599388026564e-05, 0.015419420348934627, 0.9922375740571194, 0.6075090773322849, 0.8615946862392778, 0.7323110858792001], [0.974298216577679, 0.9987821181807213, 0.004425694115241892, 0.000304187728708465, 0.9895635230597039, 0.5532842514531658, 0.9918324219714489, 0.012952455538034254], [0.974298216577679, 0.9987821181807213, 0.004425694115241892, 0.000304187728708465, 0.9895635230597039, 0.5532842514531658, 0.9918324219714489, 0.012952455538034254], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.7075313278895118, 0.9528165764663733, 0.00024028256279746944, 0.0419883730929068, 0.2223082190799704, 0.9918680521573371, 0.9957023140585564, 0.9993871853584136], [0.8061349450965536, 0.999767150024817, 0.005080980536513046, 0.01040826816049117, 0.3409665942145532, 0.9097666313819003, 0.9870252543663042, 0.08656560010172533], [0.8712277660710959, 0.9998792147767195, 0.009233476741708865, 0.006651887470136875, 0.2846353876132319, 0.8606757345444841, 0.905799724846877, 0.10941869528527111], [0.34611195464250893, 0.9993957670342325, 0.00801798220359197, 0.005710054448791813, 0.2542505785607144, 0.9526348574950757, 0.9984054781924007, 0.2510543692146448], [0.9569978143865792, 0.9999502994171116, 0.001647766104709314, 0.006770414812663102, 0.6078947163963356, 0.9739969468541787, 0.7712208386517876, 0.03224650722349818], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.7876567498967855, 0.994615825699678, 0.0009521703700097382, 0.027907783035342672, 0.17821642193876197, 0.978884650851592, 0.9847004284184796, 0.9882120357135643], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.9226443244561893, 0.9999047545515034, 0.003817856913325416, 0.0026048172484790626, 0.8323676248245497, 0.8508602288493949, 0.8736538657332169, 0.012228440574850534], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.7876567498967855, 0.994615825699678, 0.0009521703700097382, 0.027907783035342672, 0.17821642193876197, 0.978884650851592, 0.9847004284184796, 0.9882120357135643], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.9740046107415464, 0.9999348677511988, 0.0011280131366589966, 0.003142384557448156, 0.8576418924567042, 0.9545333286657802, 0.7904262178559613, 0.014075106739490166], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.9545750084640168, 0.8010221013459997, 0.0010768383774790136, 4.5285242317457465e-05, 0.7122118355741406, 0.15925456350948017, 0.01931318272553877, 0.9275966551817966], [0.9740046107415464, 0.9999348677511988, 0.0011280131366589966, 0.003142384557448156, 0.8576418924567042, 0.9545333286657802, 0.7904262178559613, 0.014075106739490166], [0.9204991475269404, 0.997820405498567, 0.004755920056928505, 0.00027945528545256926, 0.31836246282515795, 0.5098160564019794, 0.0638457240331752, 0.5113664414372071], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.8061349450965536, 0.999767150024817, 0.005080980536513046, 0.01040826816049117, 0.3409665942145532, 0.9097666313819003, 0.9870252543663042, 0.08656560010172533], [0.31246477046969495, 0.9922885565366311, 0.010622950850010143, 0.012353719030255849, 0.5544625807993329, 0.9562633327178526, 0.9981487629810418, 0.8235470252929691], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.04848322956586269, 0.6519622002511831, 0.003934940622089357, 0.007727537091957959, 0.925691919989501, 0.9008818405453288, 0.9996227820171267, 0.8388979769520879], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.04141089323783879, 0.9910551102522079, 1.654425071079162e-05, 0.015903012386923807, 0.9188840210179037, 0.9607844596952501, 0.988456321290572, 0.9236698877001335], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.9872672484915054, 0.9999085260266926, 3.251124892882648e-05, 0.012813790423038318, 0.8739435201397908, 0.9994862817595647, 0.14787339150433546, 0.0903285883262022], [0.04141089323783879, 0.9910551102522079, 1.654425071079162e-05, 0.015903012386923807, 0.9188840210179037, 0.9607844596952501, 0.988456321290572, 0.9236698877001335], [0.6934857767802269, 0.7838636051546535, 0.00978088179529716, 0.000589466785014496, 0.7856126934390497, 0.005601083146058356, 0.36953863915183255, 0.3925057375012165], [0.8712277660710959, 0.9998792147767195, 0.009233476741708865, 0.006651887470136875, 0.2846353876132319, 0.8606757345444841, 0.905799724846877, 0.10941869528527111], [0.8849374301089054, 0.9998694186934906, 0.005269414155628153, 0.003970307908163266, 0.7044132096685062, 0.8262784437953502, 0.7945511788980267, 0.020190231346340615], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.04141089323783879, 0.9910551102522079, 1.654425071079162e-05, 0.015903012386923807, 0.9188840210179037, 0.9607844596952501, 0.988456321290572, 0.9236698877001335], [0.8823009792529095, 0.5060423110732729, 3.0857599388026564e-05, 0.015419420348934627, 0.9922375740571194, 0.6075090773322849, 0.8615946862392778, 0.7323110858792001], [0.7664603779054239, 0.9978133594063298, 0.030699500602800852, 0.0014420041420162499, 0.5525253184104642, 0.2475590149715773, 0.6243340601317057, 0.3072161596577674], [0.43011239510426436, 0.9100478388500701, 0.0015393320922250208, 0.00045918819404481973, 0.9979542901758269, 0.8626589457233201, 0.9932258187487738, 0.024102697425247333], [0.8823009792529095, 0.5060423110732729, 3.0857599388026564e-05, 0.015419420348934627, 0.9922375740571194, 0.6075090773322849, 0.8615946862392778, 0.7323110858792001], [0.9135271257100342, 0.996591601811556, 0.014580504042979716, 0.0002899709156167404, 0.9912420642891241, 0.5609059388773838, 0.9936845485989105, 0.02069013167663768], [0.04848322956586269, 0.6519622002511831, 0.003934940622089357, 0.007727537091957959, 0.925691919989501, 0.9008818405453288, 0.9996227820171267, 0.8388979769520879], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.6934857767802269, 0.7838636051546535, 0.00978088179529716, 0.000589466785014496, 0.7856126934390497, 0.005601083146058356, 0.36953863915183255, 0.3925057375012165], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.8061349450965536, 0.999767150024817, 0.005080980536513046, 0.01040826816049117, 0.3409665942145532, 0.9097666313819003, 0.9870252543663042, 0.08656560010172533], [0.04848322956586269, 0.6519622002511831, 0.003934940622089357, 0.007727537091957959, 0.925691919989501, 0.9008818405453288, 0.9996227820171267, 0.8388979769520879], [0.9820414006235906, 0.9999135464442158, 0.000251464690814552, 0.0067050602497352035, 0.8462159835954384, 0.9919592161371642, 0.6597365357308984, 0.019650064647707154], [0.9226443244561893, 0.9999047545515034, 0.003817856913325416, 0.0026048172484790626, 0.8323676248245497, 0.8508602288493949, 0.8736538657332169, 0.012228440574850534], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.7876567498967855, 0.994615825699678, 0.0009521703700097382, 0.027907783035342672, 0.17821642193876197, 0.978884650851592, 0.9847004284184796, 0.9882120357135643], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.9394784796966786, 0.9997140044878048, 0.005478297748100236, 0.0008731998901328106, 0.2989775965870789, 0.7627778793761508, 0.20634254240689162, 0.21503350720998343], [0.975712869813101, 0.9999001373682023, 0.0016433498121412875, 0.00232889889758488, 0.6572861064040806, 0.9605841545972731, 0.4331787937043679, 0.039429061212534595], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.9491101867098894, 0.9983663541388137, 0.009796995267325935, 0.00029265245163625337, 0.9888247829552145, 0.501870738402459, 0.9936141020286289, 0.014508537600457536], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.9740046107415464, 0.9999348677511988, 0.0011280131366589966, 0.003142384557448156, 0.8576418924567042, 0.9545333286657802, 0.7904262178559613, 0.014075106739490166], [0.7075313278895118, 0.9528165764663733, 0.00024028256279746944, 0.0419883730929068, 0.2223082190799704, 0.9918680521573371, 0.9957023140585564, 0.9993871853584136], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.5749599830101485, 0.9687977761988059, 0.0023813640235534695, 0.1541842525112368, 0.7412670211815588, 0.8581884113393773, 0.9810544594619939, 0.9945756272130561], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.974298216577679, 0.9987821181807213, 0.004425694115241892, 0.000304187728708465, 0.9895635230597039, 0.5532842514531658, 0.9918324219714489, 0.012952455538034254], [0.7075313278895118, 0.9528165764663733, 0.00024028256279746944, 0.0419883730929068, 0.2223082190799704, 0.9918680521573371, 0.9957023140585564, 0.9993871853584136], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.975712869813101, 0.9999001373682023, 0.0016433498121412875, 0.00232889889758488, 0.6572861064040806, 0.9605841545972731, 0.4331787937043679, 0.039429061212534595], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.9906780299956643, 0.9998534146447304, 0.00043349919664669275, 0.0021346315842544383, 0.9027977918684693, 0.9787792894670917, 0.5383102021369034, 0.020985795792433527], [0.975712869813101, 0.9999001373682023, 0.0016433498121412875, 0.00232889889758488, 0.6572861064040806, 0.9605841545972731, 0.4331787937043679, 0.039429061212534595], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.8823009792529095, 0.5060423110732729, 3.0857599388026564e-05, 0.015419420348934627, 0.9922375740571194, 0.6075090773322849, 0.8615946862392778, 0.7323110858792001], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.3496790238264589, 0.9951780835506667, 0.007652476325125691, 0.016407282129121008, 0.4781565714146621, 0.9680365649603411, 0.9980740663951763, 0.8146739775981933], [0.6934857767802269, 0.7838636051546535, 0.00978088179529716, 0.000589466785014496, 0.7856126934390497, 0.005601083146058356, 0.36953863915183255, 0.3925057375012165], [0.8712277660710959, 0.9998792147767195, 0.009233476741708865, 0.006651887470136875, 0.2846353876132319, 0.8606757345444841, 0.905799724846877, 0.10941869528527111], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.9226443244561893, 0.9999047545515034, 0.003817856913325416, 0.0026048172484790626, 0.8323676248245497, 0.8508602288493949, 0.8736538657332169, 0.012228440574850534], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.9940756574013799, 0.999851937423747, 2.653066991749665e-05, 0.006737893689430976, 0.9067979203601093, 0.9986603148308576, 0.14473133131990562, 0.05638620510711369], [0.974298216577679, 0.9987821181807213, 0.004425694115241892, 0.000304187728708465, 0.9895635230597039, 0.5532842514531658, 0.9918324219714489, 0.012952455538034254], [0.9545750084640168, 0.8010221013459997, 0.0010768383774790136, 4.5285242317457465e-05, 0.7122118355741406, 0.15925456350948017, 0.01931318272553877, 0.9275966551817966], [0.6961927609998032, 0.7995011681043653, 0.007478646672682654, 0.0006575746380115004, 0.8535660912449907, 0.005791708319848361, 0.5084921008279353, 0.29734664554467366], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.9545750084640168, 0.8010221013459997, 0.0010768383774790136, 4.5285242317457465e-05, 0.7122118355741406, 0.15925456350948017, 0.01931318272553877, 0.9275966551817966], [0.8894884311869264, 0.9997973322992432, 0.010176229478222287, 0.003641320387798874, 0.41977764722759664, 0.7149835924672313, 0.8334698236404028, 0.060271472885201315], [0.8721615715499735, 0.9999086487036207, 0.0052438766262562575, 0.009269188454813193, 0.30045710434598644, 0.9226049121166215, 0.919673621976729, 0.11699230898834169], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.9740046107415464, 0.9999348677511988, 0.0011280131366589966, 0.003142384557448156, 0.8576418924567042, 0.9545333286657802, 0.7904262178559613, 0.014075106739490166], [0.6934857767802269, 0.7838636051546535, 0.00978088179529716, 0.000589466785014496, 0.7856126934390497, 0.005601083146058356, 0.36953863915183255, 0.3925057375012165], [0.9906780299956643, 0.9998534146447304, 0.00043349919664669275, 0.0021346315842544383, 0.9027977918684693, 0.9787792894670917, 0.5383102021369034, 0.020985795792433527], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.8849374301089054, 0.9998694186934906, 0.005269414155628153, 0.003970307908163266, 0.7044132096685062, 0.8262784437953502, 0.7945511788980267, 0.020190231346340615], [0.9906780299956643, 0.9998534146447304, 0.00043349919664669275, 0.0021346315842544383, 0.9027977918684693, 0.9787792894670917, 0.5383102021369034, 0.020985795792433527], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.8712277660710959, 0.9998792147767195, 0.009233476741708865, 0.006651887470136875, 0.2846353876132319, 0.8606757345444841, 0.905799724846877, 0.10941869528527111], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.9740046107415464, 0.9999348677511988, 0.0011280131366589966, 0.003142384557448156, 0.8576418924567042, 0.9545333286657802, 0.7904262178559613, 0.014075106739490166], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.04141089323783879, 0.9910551102522079, 1.654425071079162e-05, 0.015903012386923807, 0.9188840210179037, 0.9607844596952501, 0.988456321290572, 0.9236698877001335], [0.8712277660710959, 0.9998792147767195, 0.009233476741708865, 0.006651887470136875, 0.2846353876132319, 0.8606757345444841, 0.905799724846877, 0.10941869528527111], [0.9696309451921452, 0.9999996719273639, 0.00021927544176994542, 0.004065877251051302, 0.6149022306729102, 0.9999633304112948, 0.001980127323861929, 0.98920505416126], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.9940756574013799, 0.999851937423747, 2.653066991749665e-05, 0.006737893689430976, 0.9067979203601093, 0.9986603148308576, 0.14473133131990562, 0.05638620510711369], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.3496790238264589, 0.9951780835506667, 0.007652476325125691, 0.016407282129121008, 0.4781565714146621, 0.9680365649603411, 0.9980740663951763, 0.8146739775981933], [0.9135271257100342, 0.996591601811556, 0.014580504042979716, 0.0002899709156167404, 0.9912420642891241, 0.5609059388773838, 0.9936845485989105, 0.02069013167663768], [0.7075313278895118, 0.9528165764663733, 0.00024028256279746944, 0.0419883730929068, 0.2223082190799704, 0.9918680521573371, 0.9957023140585564, 0.9993871853584136], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.6961927609998032, 0.7995011681043653, 0.007478646672682654, 0.0006575746380115004, 0.8535660912449907, 0.005791708319848361, 0.5084921008279353, 0.29734664554467366], [0.9872672484915054, 0.9999085260266926, 3.251124892882648e-05, 0.012813790423038318, 0.8739435201397908, 0.9994862817595647, 0.14787339150433546, 0.0903285883262022], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.8721615715499735, 0.9999086487036207, 0.0052438766262562575, 0.009269188454813193, 0.30045710434598644, 0.9226049121166215, 0.919673621976729, 0.11699230898834169], [0.8061349450965536, 0.999767150024817, 0.005080980536513046, 0.01040826816049117, 0.3409665942145532, 0.9097666313819003, 0.9870252543663042, 0.08656560010172533], [0.9394784796966786, 0.9997140044878048, 0.005478297748100236, 0.0008731998901328106, 0.2989775965870789, 0.7627778793761508, 0.20634254240689162, 0.21503350720998343], [0.6961927609998032, 0.7995011681043653, 0.007478646672682654, 0.0006575746380115004, 0.8535660912449907, 0.005791708319848361, 0.5084921008279353, 0.29734664554467366], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.9545750084640168, 0.8010221013459997, 0.0010768383774790136, 4.5285242317457465e-05, 0.7122118355741406, 0.15925456350948017, 0.01931318272553877, 0.9275966551817966], [0.9740046107415464, 0.9999348677511988, 0.0011280131366589966, 0.003142384557448156, 0.8576418924567042, 0.9545333286657802, 0.7904262178559613, 0.014075106739490166], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.9569978143865792, 0.9999502994171116, 0.001647766104709314, 0.006770414812663102, 0.6078947163963356, 0.9739969468541787, 0.7712208386517876, 0.03224650722349818], [0.8721615715499735, 0.9999086487036207, 0.0052438766262562575, 0.009269188454813193, 0.30045710434598644, 0.9226049121166215, 0.919673621976729, 0.11699230898834169], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.8721615715499735, 0.9999086487036207, 0.0052438766262562575, 0.009269188454813193, 0.30045710434598644, 0.9226049121166215, 0.919673621976729, 0.11699230898834169], [0.975712869813101, 0.9999001373682023, 0.0016433498121412875, 0.00232889889758488, 0.6572861064040806, 0.9605841545972731, 0.4331787937043679, 0.039429061212534595], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.8894884311869264, 0.9997973322992432, 0.010176229478222287, 0.003641320387798874, 0.41977764722759664, 0.7149835924672313, 0.8334698236404028, 0.060271472885201315], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.9204991475269404, 0.997820405498567, 0.004755920056928505, 0.00027945528545256926, 0.31836246282515795, 0.5098160564019794, 0.0638457240331752, 0.5113664414372071], [0.9394784796966786, 0.9997140044878048, 0.005478297748100236, 0.0008731998901328106, 0.2989775965870789, 0.7627778793761508, 0.20634254240689162, 0.21503350720998343], [0.5749599830101485, 0.9687977761988059, 0.0023813640235534695, 0.1541842525112368, 0.7412670211815588, 0.8581884113393773, 0.9810544594619939, 0.9945756272130561], [0.8849374301089054, 0.9998694186934906, 0.005269414155628153, 0.003970307908163266, 0.7044132096685062, 0.8262784437953502, 0.7945511788980267, 0.020190231346340615], [0.9545750084640168, 0.8010221013459997, 0.0010768383774790136, 4.5285242317457465e-05, 0.7122118355741406, 0.15925456350948017, 0.01931318272553877, 0.9275966551817966], [0.8642116426125306, 0.9999076857072546, 0.0019158998972424609, 0.012520957717797179, 0.35861923963866427, 0.9596018297693715, 0.925928134148489, 0.12797202962078064], [0.9492409733663436, 0.9998720501202061, 0.00026265005707428097, 0.014216156794433854, 0.7253270042206533, 0.9930368078663476, 0.835602039566071, 0.052035705737287156], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.9924837778486827, 0.9989268643102444, 0.0012191648870903842, 0.0002908986915298129, 0.9926994542722708, 0.6874496494738243, 0.9839911840642187, 0.01377555881949025], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.04141089323783879, 0.9910551102522079, 1.654425071079162e-05, 0.015903012386923807, 0.9188840210179037, 0.9607844596952501, 0.988456321290572, 0.9236698877001335], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.04141089323783879, 0.9910551102522079, 1.654425071079162e-05, 0.015903012386923807, 0.9188840210179037, 0.9607844596952501, 0.988456321290572, 0.9236698877001335], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.04141089323783879, 0.9910551102522079, 1.654425071079162e-05, 0.015903012386923807, 0.9188840210179037, 0.9607844596952501, 0.988456321290572, 0.9236698877001335], [0.974298216577679, 0.9987821181807213, 0.004425694115241892, 0.000304187728708465, 0.9895635230597039, 0.5532842514531658, 0.9918324219714489, 0.012952455538034254], [0.974298216577679, 0.9987821181807213, 0.004425694115241892, 0.000304187728708465, 0.9895635230597039, 0.5532842514531658, 0.9918324219714489, 0.012952455538034254], [0.975712869813101, 0.9999001373682023, 0.0016433498121412875, 0.00232889889758488, 0.6572861064040806, 0.9605841545972731, 0.4331787937043679, 0.039429061212534595], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.9924837778486827, 0.9989268643102444, 0.0012191648870903842, 0.0002908986915298129, 0.9926994542722708, 0.6874496494738243, 0.9839911840642187, 0.01377555881949025], [0.9740046107415464, 0.9999348677511988, 0.0011280131366589966, 0.003142384557448156, 0.8576418924567042, 0.9545333286657802, 0.7904262178559613, 0.014075106739490166], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.9696309451921452, 0.9999996719273639, 0.00021927544176994542, 0.004065877251051302, 0.6149022306729102, 0.9999633304112948, 0.001980127323861929, 0.98920505416126], [0.8721615715499735, 0.9999086487036207, 0.0052438766262562575, 0.009269188454813193, 0.30045710434598644, 0.9226049121166215, 0.919673621976729, 0.11699230898834169], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.8721615715499735, 0.9999086487036207, 0.0052438766262562575, 0.009269188454813193, 0.30045710434598644, 0.9226049121166215, 0.919673621976729, 0.11699230898834169], [0.6934857767802269, 0.7838636051546535, 0.00978088179529716, 0.000589466785014496, 0.7856126934390497, 0.005601083146058356, 0.36953863915183255, 0.3925057375012165], [0.9128455984341999, 0.9999544829433648, 0.004761634982689357, 0.006700794779618401, 0.3874082764581125, 0.9329089125208433, 0.8465610034125904, 0.0596793947015072], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.9924837778486827, 0.9989268643102444, 0.0012191648870903842, 0.0002908986915298129, 0.9926994542722708, 0.6874496494738243, 0.9839911840642187, 0.01377555881949025], [0.9128455984341999, 0.9999544829433648, 0.004761634982689357, 0.006700794779618401, 0.3874082764581125, 0.9329089125208433, 0.8465610034125904, 0.0596793947015072], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.9906780299956643, 0.9998534146447304, 0.00043349919664669275, 0.0021346315842544383, 0.9027977918684693, 0.9787792894670917, 0.5383102021369034, 0.020985795792433527], [0.9204991475269404, 0.997820405498567, 0.004755920056928505, 0.00027945528545256926, 0.31836246282515795, 0.5098160564019794, 0.0638457240331752, 0.5113664414372071], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.6961927609998032, 0.7995011681043653, 0.007478646672682654, 0.0006575746380115004, 0.8535660912449907, 0.005791708319848361, 0.5084921008279353, 0.29734664554467366], [0.6961927609998032, 0.7995011681043653, 0.007478646672682654, 0.0006575746380115004, 0.8535660912449907, 0.005791708319848361, 0.5084921008279353, 0.29734664554467366], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.8061349450965536, 0.999767150024817, 0.005080980536513046, 0.01040826816049117, 0.3409665942145532, 0.9097666313819003, 0.9870252543663042, 0.08656560010172533], [0.9491101867098894, 0.9983663541388137, 0.009796995267325935, 0.00029265245163625337, 0.9888247829552145, 0.501870738402459, 0.9936141020286289, 0.014508537600457536], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.975712869813101, 0.9999001373682023, 0.0016433498121412875, 0.00232889889758488, 0.6572861064040806, 0.9605841545972731, 0.4331787937043679, 0.039429061212534595], [0.5749599830101485, 0.9687977761988059, 0.0023813640235534695, 0.1541842525112368, 0.7412670211815588, 0.8581884113393773, 0.9810544594619939, 0.9945756272130561], [0.8061349450965536, 0.999767150024817, 0.005080980536513046, 0.01040826816049117, 0.3409665942145532, 0.9097666313819003, 0.9870252543663042, 0.08656560010172533], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.975712869813101, 0.9999001373682023, 0.0016433498121412875, 0.00232889889758488, 0.6572861064040806, 0.9605841545972731, 0.4331787937043679, 0.039429061212534595], [0.6961927609998032, 0.7995011681043653, 0.007478646672682654, 0.0006575746380115004, 0.8535660912449907, 0.005791708319848361, 0.5084921008279353, 0.29734664554467366], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.9820414006235906, 0.9999135464442158, 0.000251464690814552, 0.0067050602497352035, 0.8462159835954384, 0.9919592161371642, 0.6597365357308984, 0.019650064647707154], [0.9940756574013799, 0.999851937423747, 2.653066991749665e-05, 0.006737893689430976, 0.9067979203601093, 0.9986603148308576, 0.14473133131990562, 0.05638620510711369], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.9906780299956643, 0.9998534146447304, 0.00043349919664669275, 0.0021346315842544383, 0.9027977918684693, 0.9787792894670917, 0.5383102021369034, 0.020985795792433527], [0.6934857767802269, 0.7838636051546535, 0.00978088179529716, 0.000589466785014496, 0.7856126934390497, 0.005601083146058356, 0.36953863915183255, 0.3925057375012165], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.9135271257100342, 0.996591601811556, 0.014580504042979716, 0.0002899709156167404, 0.9912420642891241, 0.5609059388773838, 0.9936845485989105, 0.02069013167663768], [0.9569978143865792, 0.9999502994171116, 0.001647766104709314, 0.006770414812663102, 0.6078947163963356, 0.9739969468541787, 0.7712208386517876, 0.03224650722349818], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.9226443244561893, 0.9999047545515034, 0.003817856913325416, 0.0026048172484790626, 0.8323676248245497, 0.8508602288493949, 0.8736538657332169, 0.012228440574850534], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.31246477046969495, 0.9922885565366311, 0.010622950850010143, 0.012353719030255849, 0.5544625807993329, 0.9562633327178526, 0.9981487629810418, 0.8235470252929691], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.7876567498967855, 0.994615825699678, 0.0009521703700097382, 0.027907783035342672, 0.17821642193876197, 0.978884650851592, 0.9847004284184796, 0.9882120357135643], [0.9872672484915054, 0.9999085260266926, 3.251124892882648e-05, 0.012813790423038318, 0.8739435201397908, 0.9994862817595647, 0.14787339150433546, 0.0903285883262022], [0.9924837778486827, 0.9989268643102444, 0.0012191648870903842, 0.0002908986915298129, 0.9926994542722708, 0.6874496494738243, 0.9839911840642187, 0.01377555881949025], [0.974298216577679, 0.9987821181807213, 0.004425694115241892, 0.000304187728708465, 0.9895635230597039, 0.5532842514531658, 0.9918324219714489, 0.012952455538034254], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.31246477046969495, 0.9922885565366311, 0.010622950850010143, 0.012353719030255849, 0.5544625807993329, 0.9562633327178526, 0.9981487629810418, 0.8235470252929691], [0.9696309451921452, 0.9999996719273639, 0.00021927544176994542, 0.004065877251051302, 0.6149022306729102, 0.9999633304112948, 0.001980127323861929, 0.98920505416126], [0.3496790238264589, 0.9951780835506667, 0.007652476325125691, 0.016407282129121008, 0.4781565714146621, 0.9680365649603411, 0.9980740663951763, 0.8146739775981933], [0.9569978143865792, 0.9999502994171116, 0.001647766104709314, 0.006770414812663102, 0.6078947163963356, 0.9739969468541787, 0.7712208386517876, 0.03224650722349818], [0.8061349450965536, 0.999767150024817, 0.005080980536513046, 0.01040826816049117, 0.3409665942145532, 0.9097666313819003, 0.9870252543663042, 0.08656560010172533], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.8823009792529095, 0.5060423110732729, 3.0857599388026564e-05, 0.015419420348934627, 0.9922375740571194, 0.6075090773322849, 0.8615946862392778, 0.7323110858792001], [0.43011239510426436, 0.9100478388500701, 0.0015393320922250208, 0.00045918819404481973, 0.9979542901758269, 0.8626589457233201, 0.9932258187487738, 0.024102697425247333], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.9924837778486827, 0.9989268643102444, 0.0012191648870903842, 0.0002908986915298129, 0.9926994542722708, 0.6874496494738243, 0.9839911840642187, 0.01377555881949025], [0.9740046107415464, 0.9999348677511988, 0.0011280131366589966, 0.003142384557448156, 0.8576418924567042, 0.9545333286657802, 0.7904262178559613, 0.014075106739490166], [0.7075313278895118, 0.9528165764663733, 0.00024028256279746944, 0.0419883730929068, 0.2223082190799704, 0.9918680521573371, 0.9957023140585564, 0.9993871853584136], [0.8894884311869264, 0.9997973322992432, 0.010176229478222287, 0.003641320387798874, 0.41977764722759664, 0.7149835924672313, 0.8334698236404028, 0.060271472885201315], [0.7876567498967855, 0.994615825699678, 0.0009521703700097382, 0.027907783035342672, 0.17821642193876197, 0.978884650851592, 0.9847004284184796, 0.9882120357135643], [0.04848322956586269, 0.6519622002511831, 0.003934940622089357, 0.007727537091957959, 0.925691919989501, 0.9008818405453288, 0.9996227820171267, 0.8388979769520879], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.9135271257100342, 0.996591601811556, 0.014580504042979716, 0.0002899709156167404, 0.9912420642891241, 0.5609059388773838, 0.9936845485989105, 0.02069013167663768], [0.8642116426125306, 0.9999076857072546, 0.0019158998972424609, 0.012520957717797179, 0.35861923963866427, 0.9596018297693715, 0.925928134148489, 0.12797202962078064], [0.9906780299956643, 0.9998534146447304, 0.00043349919664669275, 0.0021346315842544383, 0.9027977918684693, 0.9787792894670917, 0.5383102021369034, 0.020985795792433527], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.04848322956586269, 0.6519622002511831, 0.003934940622089357, 0.007727537091957959, 0.925691919989501, 0.9008818405453288, 0.9996227820171267, 0.8388979769520879], [0.8712277660710959, 0.9998792147767195, 0.009233476741708865, 0.006651887470136875, 0.2846353876132319, 0.8606757345444841, 0.905799724846877, 0.10941869528527111], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.5749599830101485, 0.9687977761988059, 0.0023813640235534695, 0.1541842525112368, 0.7412670211815588, 0.8581884113393773, 0.9810544594619939, 0.9945756272130561], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.9569978143865792, 0.9999502994171116, 0.001647766104709314, 0.006770414812663102, 0.6078947163963356, 0.9739969468541787, 0.7712208386517876, 0.03224650722349818], [0.8894884311869264, 0.9997973322992432, 0.010176229478222287, 0.003641320387798874, 0.41977764722759664, 0.7149835924672313, 0.8334698236404028, 0.060271472885201315], [0.7876567498967855, 0.994615825699678, 0.0009521703700097382, 0.027907783035342672, 0.17821642193876197, 0.978884650851592, 0.9847004284184796, 0.9882120357135643], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.8642116426125306, 0.9999076857072546, 0.0019158998972424609, 0.012520957717797179, 0.35861923963866427, 0.9596018297693715, 0.925928134148489, 0.12797202962078064], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.8712277660710959, 0.9998792147767195, 0.009233476741708865, 0.006651887470136875, 0.2846353876132319, 0.8606757345444841, 0.905799724846877, 0.10941869528527111], [0.8823009792529095, 0.5060423110732729, 3.0857599388026564e-05, 0.015419420348934627, 0.9922375740571194, 0.6075090773322849, 0.8615946862392778, 0.7323110858792001], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.9872672484915054, 0.9999085260266926, 3.251124892882648e-05, 0.012813790423038318, 0.8739435201397908, 0.9994862817595647, 0.14787339150433546, 0.0903285883262022], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.974298216577679, 0.9987821181807213, 0.004425694115241892, 0.000304187728708465, 0.9895635230597039, 0.5532842514531658, 0.9918324219714489, 0.012952455538034254], [0.34611195464250893, 0.9993957670342325, 0.00801798220359197, 0.005710054448791813, 0.2542505785607144, 0.9526348574950757, 0.9984054781924007, 0.2510543692146448], [0.8894884311869264, 0.9997973322992432, 0.010176229478222287, 0.003641320387798874, 0.41977764722759664, 0.7149835924672313, 0.8334698236404028, 0.060271472885201315], [0.5749599830101485, 0.9687977761988059, 0.0023813640235534695, 0.1541842525112368, 0.7412670211815588, 0.8581884113393773, 0.9810544594619939, 0.9945756272130561], [0.9135271257100342, 0.996591601811556, 0.014580504042979716, 0.0002899709156167404, 0.9912420642891241, 0.5609059388773838, 0.9936845485989105, 0.02069013167663768], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.9924837778486827, 0.9989268643102444, 0.0012191648870903842, 0.0002908986915298129, 0.9926994542722708, 0.6874496494738243, 0.9839911840642187, 0.01377555881949025], [0.04141089323783879, 0.9910551102522079, 1.654425071079162e-05, 0.015903012386923807, 0.9188840210179037, 0.9607844596952501, 0.988456321290572, 0.9236698877001335], [0.8061349450965536, 0.999767150024817, 0.005080980536513046, 0.01040826816049117, 0.3409665942145532, 0.9097666313819003, 0.9870252543663042, 0.08656560010172533], [0.9740046107415464, 0.9999348677511988, 0.0011280131366589966, 0.003142384557448156, 0.8576418924567042, 0.9545333286657802, 0.7904262178559613, 0.014075106739490166], [0.9128455984341999, 0.9999544829433648, 0.004761634982689357, 0.006700794779618401, 0.3874082764581125, 0.9329089125208433, 0.8465610034125904, 0.0596793947015072], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.9940756574013799, 0.999851937423747, 2.653066991749665e-05, 0.006737893689430976, 0.9067979203601093, 0.9986603148308576, 0.14473133131990562, 0.05638620510711369], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.5749599830101485, 0.9687977761988059, 0.0023813640235534695, 0.1541842525112368, 0.7412670211815588, 0.8581884113393773, 0.9810544594619939, 0.9945756272130561], [0.9545750084640168, 0.8010221013459997, 0.0010768383774790136, 4.5285242317457465e-05, 0.7122118355741406, 0.15925456350948017, 0.01931318272553877, 0.9275966551817966], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.6934857767802269, 0.7838636051546535, 0.00978088179529716, 0.000589466785014496, 0.7856126934390497, 0.005601083146058356, 0.36953863915183255, 0.3925057375012165], [0.9128455984341999, 0.9999544829433648, 0.004761634982689357, 0.006700794779618401, 0.3874082764581125, 0.9329089125208433, 0.8465610034125904, 0.0596793947015072], [0.04848322956586269, 0.6519622002511831, 0.003934940622089357, 0.007727537091957959, 0.925691919989501, 0.9008818405453288, 0.9996227820171267, 0.8388979769520879], [0.9204991475269404, 0.997820405498567, 0.004755920056928505, 0.00027945528545256926, 0.31836246282515795, 0.5098160564019794, 0.0638457240331752, 0.5113664414372071], [0.9226443244561893, 0.9999047545515034, 0.003817856913325416, 0.0026048172484790626, 0.8323676248245497, 0.8508602288493949, 0.8736538657332169, 0.012228440574850534], [0.8061349450965536, 0.999767150024817, 0.005080980536513046, 0.01040826816049117, 0.3409665942145532, 0.9097666313819003, 0.9870252543663042, 0.08656560010172533], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.9128455984341999, 0.9999544829433648, 0.004761634982689357, 0.006700794779618401, 0.3874082764581125, 0.9329089125208433, 0.8465610034125904, 0.0596793947015072], [0.43011239510426436, 0.9100478388500701, 0.0015393320922250208, 0.00045918819404481973, 0.9979542901758269, 0.8626589457233201, 0.9932258187487738, 0.024102697425247333], [0.7075313278895118, 0.9528165764663733, 0.00024028256279746944, 0.0419883730929068, 0.2223082190799704, 0.9918680521573371, 0.9957023140585564, 0.9993871853584136], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.8823009792529095, 0.5060423110732729, 3.0857599388026564e-05, 0.015419420348934627, 0.9922375740571194, 0.6075090773322849, 0.8615946862392778, 0.7323110858792001], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.9696309451921452, 0.9999996719273639, 0.00021927544176994542, 0.004065877251051302, 0.6149022306729102, 0.9999633304112948, 0.001980127323861929, 0.98920505416126], [0.04848322956586269, 0.6519622002511831, 0.003934940622089357, 0.007727537091957959, 0.925691919989501, 0.9008818405453288, 0.9996227820171267, 0.8388979769520879], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.5749599830101485, 0.9687977761988059, 0.0023813640235534695, 0.1541842525112368, 0.7412670211815588, 0.8581884113393773, 0.9810544594619939, 0.9945756272130561], [0.04141089323783879, 0.9910551102522079, 1.654425071079162e-05, 0.015903012386923807, 0.9188840210179037, 0.9607844596952501, 0.988456321290572, 0.9236698877001335], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.9924837778486827, 0.9989268643102444, 0.0012191648870903842, 0.0002908986915298129, 0.9926994542722708, 0.6874496494738243, 0.9839911840642187, 0.01377555881949025], [0.9740046107415464, 0.9999348677511988, 0.0011280131366589966, 0.003142384557448156, 0.8576418924567042, 0.9545333286657802, 0.7904262178559613, 0.014075106739490166], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.9491101867098894, 0.9983663541388137, 0.009796995267325935, 0.00029265245163625337, 0.9888247829552145, 0.501870738402459, 0.9936141020286289, 0.014508537600457536], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.7876567498967855, 0.994615825699678, 0.0009521703700097382, 0.027907783035342672, 0.17821642193876197, 0.978884650851592, 0.9847004284184796, 0.9882120357135643], [0.5749599830101485, 0.9687977761988059, 0.0023813640235534695, 0.1541842525112368, 0.7412670211815588, 0.8581884113393773, 0.9810544594619939, 0.9945756272130561], [0.9491101867098894, 0.9983663541388137, 0.009796995267325935, 0.00029265245163625337, 0.9888247829552145, 0.501870738402459, 0.9936141020286289, 0.014508537600457536], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.8849374301089054, 0.9998694186934906, 0.005269414155628153, 0.003970307908163266, 0.7044132096685062, 0.8262784437953502, 0.7945511788980267, 0.020190231346340615], [0.9872672484915054, 0.9999085260266926, 3.251124892882648e-05, 0.012813790423038318, 0.8739435201397908, 0.9994862817595647, 0.14787339150433546, 0.0903285883262022], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.34611195464250893, 0.9993957670342325, 0.00801798220359197, 0.005710054448791813, 0.2542505785607144, 0.9526348574950757, 0.9984054781924007, 0.2510543692146448], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.6934857767802269, 0.7838636051546535, 0.00978088179529716, 0.000589466785014496, 0.7856126934390497, 0.005601083146058356, 0.36953863915183255, 0.3925057375012165], [0.8849374301089054, 0.9998694186934906, 0.005269414155628153, 0.003970307908163266, 0.7044132096685062, 0.8262784437953502, 0.7945511788980267, 0.020190231346340615], [0.8894884311869264, 0.9997973322992432, 0.010176229478222287, 0.003641320387798874, 0.41977764722759664, 0.7149835924672313, 0.8334698236404028, 0.060271472885201315], [0.9906780299956643, 0.9998534146447304, 0.00043349919664669275, 0.0021346315842544383, 0.9027977918684693, 0.9787792894670917, 0.5383102021369034, 0.020985795792433527], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.04848322956586269, 0.6519622002511831, 0.003934940622089357, 0.007727537091957959, 0.925691919989501, 0.9008818405453288, 0.9996227820171267, 0.8388979769520879], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.8894884311869264, 0.9997973322992432, 0.010176229478222287, 0.003641320387798874, 0.41977764722759664, 0.7149835924672313, 0.8334698236404028, 0.060271472885201315], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.9204991475269404, 0.997820405498567, 0.004755920056928505, 0.00027945528545256926, 0.31836246282515795, 0.5098160564019794, 0.0638457240331752, 0.5113664414372071], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.6934857767802269, 0.7838636051546535, 0.00978088179529716, 0.000589466785014496, 0.7856126934390497, 0.005601083146058356, 0.36953863915183255, 0.3925057375012165], [0.04141089323783879, 0.9910551102522079, 1.654425071079162e-05, 0.015903012386923807, 0.9188840210179037, 0.9607844596952501, 0.988456321290572, 0.9236698877001335], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.7664603779054239, 0.9978133594063298, 0.030699500602800852, 0.0014420041420162499, 0.5525253184104642, 0.2475590149715773, 0.6243340601317057, 0.3072161596577674], [0.34611195464250893, 0.9993957670342325, 0.00801798220359197, 0.005710054448791813, 0.2542505785607144, 0.9526348574950757, 0.9984054781924007, 0.2510543692146448], [0.8721615715499735, 0.9999086487036207, 0.0052438766262562575, 0.009269188454813193, 0.30045710434598644, 0.9226049121166215, 0.919673621976729, 0.11699230898834169], [0.975712869813101, 0.9999001373682023, 0.0016433498121412875, 0.00232889889758488, 0.6572861064040806, 0.9605841545972731, 0.4331787937043679, 0.039429061212534595], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.7664603779054239, 0.9978133594063298, 0.030699500602800852, 0.0014420041420162499, 0.5525253184104642, 0.2475590149715773, 0.6243340601317057, 0.3072161596577674], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.9924837778486827, 0.9989268643102444, 0.0012191648870903842, 0.0002908986915298129, 0.9926994542722708, 0.6874496494738243, 0.9839911840642187, 0.01377555881949025], [0.9491101867098894, 0.9983663541388137, 0.009796995267325935, 0.00029265245163625337, 0.9888247829552145, 0.501870738402459, 0.9936141020286289, 0.014508537600457536], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.9545750084640168, 0.8010221013459997, 0.0010768383774790136, 4.5285242317457465e-05, 0.7122118355741406, 0.15925456350948017, 0.01931318272553877, 0.9275966551817966], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.9226443244561893, 0.9999047545515034, 0.003817856913325416, 0.0026048172484790626, 0.8323676248245497, 0.8508602288493949, 0.8736538657332169, 0.012228440574850534], [0.974298216577679, 0.9987821181807213, 0.004425694115241892, 0.000304187728708465, 0.9895635230597039, 0.5532842514531658, 0.9918324219714489, 0.012952455538034254], [0.3496790238264589, 0.9951780835506667, 0.007652476325125691, 0.016407282129121008, 0.4781565714146621, 0.9680365649603411, 0.9980740663951763, 0.8146739775981933], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.6961927609998032, 0.7995011681043653, 0.007478646672682654, 0.0006575746380115004, 0.8535660912449907, 0.005791708319848361, 0.5084921008279353, 0.29734664554467366], [0.9569978143865792, 0.9999502994171116, 0.001647766104709314, 0.006770414812663102, 0.6078947163963356, 0.9739969468541787, 0.7712208386517876, 0.03224650722349818], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.9545750084640168, 0.8010221013459997, 0.0010768383774790136, 4.5285242317457465e-05, 0.7122118355741406, 0.15925456350948017, 0.01931318272553877, 0.9275966551817966], [0.8721615715499735, 0.9999086487036207, 0.0052438766262562575, 0.009269188454813193, 0.30045710434598644, 0.9226049121166215, 0.919673621976729, 0.11699230898834169], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.31246477046969495, 0.9922885565366311, 0.010622950850010143, 0.012353719030255849, 0.5544625807993329, 0.9562633327178526, 0.9981487629810418, 0.8235470252929691], [0.9226443244561893, 0.9999047545515034, 0.003817856913325416, 0.0026048172484790626, 0.8323676248245497, 0.8508602288493949, 0.8736538657332169, 0.012228440574850534], [0.34611195464250893, 0.9993957670342325, 0.00801798220359197, 0.005710054448791813, 0.2542505785607144, 0.9526348574950757, 0.9984054781924007, 0.2510543692146448], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.8849374301089054, 0.9998694186934906, 0.005269414155628153, 0.003970307908163266, 0.7044132096685062, 0.8262784437953502, 0.7945511788980267, 0.020190231346340615], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.8894884311869264, 0.9997973322992432, 0.010176229478222287, 0.003641320387798874, 0.41977764722759664, 0.7149835924672313, 0.8334698236404028, 0.060271472885201315], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.974298216577679, 0.9987821181807213, 0.004425694115241892, 0.000304187728708465, 0.9895635230597039, 0.5532842514531658, 0.9918324219714489, 0.012952455538034254], [0.974298216577679, 0.9987821181807213, 0.004425694115241892, 0.000304187728708465, 0.9895635230597039, 0.5532842514531658, 0.9918324219714489, 0.012952455538034254], [0.43011239510426436, 0.9100478388500701, 0.0015393320922250208, 0.00045918819404481973, 0.9979542901758269, 0.8626589457233201, 0.9932258187487738, 0.024102697425247333], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.31246477046969495, 0.9922885565366311, 0.010622950850010143, 0.012353719030255849, 0.5544625807993329, 0.9562633327178526, 0.9981487629810418, 0.8235470252929691], [0.9820414006235906, 0.9999135464442158, 0.000251464690814552, 0.0067050602497352035, 0.8462159835954384, 0.9919592161371642, 0.6597365357308984, 0.019650064647707154], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.43011239510426436, 0.9100478388500701, 0.0015393320922250208, 0.00045918819404481973, 0.9979542901758269, 0.8626589457233201, 0.9932258187487738, 0.024102697425247333], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.9569978143865792, 0.9999502994171116, 0.001647766104709314, 0.006770414812663102, 0.6078947163963356, 0.9739969468541787, 0.7712208386517876, 0.03224650722349818], [0.3496790238264589, 0.9951780835506667, 0.007652476325125691, 0.016407282129121008, 0.4781565714146621, 0.9680365649603411, 0.9980740663951763, 0.8146739775981933], [0.9696309451921452, 0.9999996719273639, 0.00021927544176994542, 0.004065877251051302, 0.6149022306729102, 0.9999633304112948, 0.001980127323861929, 0.98920505416126], [0.9204991475269404, 0.997820405498567, 0.004755920056928505, 0.00027945528545256926, 0.31836246282515795, 0.5098160564019794, 0.0638457240331752, 0.5113664414372071], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.8823009792529095, 0.5060423110732729, 3.0857599388026564e-05, 0.015419420348934627, 0.9922375740571194, 0.6075090773322849, 0.8615946862392778, 0.7323110858792001], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.8642116426125306, 0.9999076857072546, 0.0019158998972424609, 0.012520957717797179, 0.35861923963866427, 0.9596018297693715, 0.925928134148489, 0.12797202962078064], [0.7075313278895118, 0.9528165764663733, 0.00024028256279746944, 0.0419883730929068, 0.2223082190799704, 0.9918680521573371, 0.9957023140585564, 0.9993871853584136], [0.7876567498967855, 0.994615825699678, 0.0009521703700097382, 0.027907783035342672, 0.17821642193876197, 0.978884650851592, 0.9847004284184796, 0.9882120357135643], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.975712869813101, 0.9999001373682023, 0.0016433498121412875, 0.00232889889758488, 0.6572861064040806, 0.9605841545972731, 0.4331787937043679, 0.039429061212534595], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.6934857767802269, 0.7838636051546535, 0.00978088179529716, 0.000589466785014496, 0.7856126934390497, 0.005601083146058356, 0.36953863915183255, 0.3925057375012165], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.3496790238264589, 0.9951780835506667, 0.007652476325125691, 0.016407282129121008, 0.4781565714146621, 0.9680365649603411, 0.9980740663951763, 0.8146739775981933], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.6934857767802269, 0.7838636051546535, 0.00978088179529716, 0.000589466785014496, 0.7856126934390497, 0.005601083146058356, 0.36953863915183255, 0.3925057375012165], [0.8849374301089054, 0.9998694186934906, 0.005269414155628153, 0.003970307908163266, 0.7044132096685062, 0.8262784437953502, 0.7945511788980267, 0.020190231346340615], [0.8823009792529095, 0.5060423110732729, 3.0857599388026564e-05, 0.015419420348934627, 0.9922375740571194, 0.6075090773322849, 0.8615946862392778, 0.7323110858792001], [0.7664603779054239, 0.9978133594063298, 0.030699500602800852, 0.0014420041420162499, 0.5525253184104642, 0.2475590149715773, 0.6243340601317057, 0.3072161596577674], [0.9906780299956643, 0.9998534146447304, 0.00043349919664669275, 0.0021346315842544383, 0.9027977918684693, 0.9787792894670917, 0.5383102021369034, 0.020985795792433527], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.974298216577679, 0.9987821181807213, 0.004425694115241892, 0.000304187728708465, 0.9895635230597039, 0.5532842514531658, 0.9918324219714489, 0.012952455538034254], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.9128455984341999, 0.9999544829433648, 0.004761634982689357, 0.006700794779618401, 0.3874082764581125, 0.9329089125208433, 0.8465610034125904, 0.0596793947015072], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.9226443244561893, 0.9999047545515034, 0.003817856913325416, 0.0026048172484790626, 0.8323676248245497, 0.8508602288493949, 0.8736538657332169, 0.012228440574850534], [0.9696309451921452, 0.9999996719273639, 0.00021927544176994542, 0.004065877251051302, 0.6149022306729102, 0.9999633304112948, 0.001980127323861929, 0.98920505416126], [0.9226443244561893, 0.9999047545515034, 0.003817856913325416, 0.0026048172484790626, 0.8323676248245497, 0.8508602288493949, 0.8736538657332169, 0.012228440574850534], [0.5749599830101485, 0.9687977761988059, 0.0023813640235534695, 0.1541842525112368, 0.7412670211815588, 0.8581884113393773, 0.9810544594619939, 0.9945756272130561], [0.7075313278895118, 0.9528165764663733, 0.00024028256279746944, 0.0419883730929068, 0.2223082190799704, 0.9918680521573371, 0.9957023140585564, 0.9993871853584136], [0.974298216577679, 0.9987821181807213, 0.004425694115241892, 0.000304187728708465, 0.9895635230597039, 0.5532842514531658, 0.9918324219714489, 0.012952455538034254], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.8712277660710959, 0.9998792147767195, 0.009233476741708865, 0.006651887470136875, 0.2846353876132319, 0.8606757345444841, 0.905799724846877, 0.10941869528527111], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.974298216577679, 0.9987821181807213, 0.004425694115241892, 0.000304187728708465, 0.9895635230597039, 0.5532842514531658, 0.9918324219714489, 0.012952455538034254], [0.43011239510426436, 0.9100478388500701, 0.0015393320922250208, 0.00045918819404481973, 0.9979542901758269, 0.8626589457233201, 0.9932258187487738, 0.024102697425247333], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.8061349450965536, 0.999767150024817, 0.005080980536513046, 0.01040826816049117, 0.3409665942145532, 0.9097666313819003, 0.9870252543663042, 0.08656560010172533], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.7876567498967855, 0.994615825699678, 0.0009521703700097382, 0.027907783035342672, 0.17821642193876197, 0.978884650851592, 0.9847004284184796, 0.9882120357135643], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.974298216577679, 0.9987821181807213, 0.004425694115241892, 0.000304187728708465, 0.9895635230597039, 0.5532842514531658, 0.9918324219714489, 0.012952455538034254], [0.7664603779054239, 0.9978133594063298, 0.030699500602800852, 0.0014420041420162499, 0.5525253184104642, 0.2475590149715773, 0.6243340601317057, 0.3072161596577674], [0.6961927609998032, 0.7995011681043653, 0.007478646672682654, 0.0006575746380115004, 0.8535660912449907, 0.005791708319848361, 0.5084921008279353, 0.29734664554467366], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.8061349450965536, 0.999767150024817, 0.005080980536513046, 0.01040826816049117, 0.3409665942145532, 0.9097666313819003, 0.9870252543663042, 0.08656560010172533], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.9128455984341999, 0.9999544829433648, 0.004761634982689357, 0.006700794779618401, 0.3874082764581125, 0.9329089125208433, 0.8465610034125904, 0.0596793947015072], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.9128455984341999, 0.9999544829433648, 0.004761634982689357, 0.006700794779618401, 0.3874082764581125, 0.9329089125208433, 0.8465610034125904, 0.0596793947015072], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.8894884311869264, 0.9997973322992432, 0.010176229478222287, 0.003641320387798874, 0.41977764722759664, 0.7149835924672313, 0.8334698236404028, 0.060271472885201315], [0.9696309451921452, 0.9999996719273639, 0.00021927544176994542, 0.004065877251051302, 0.6149022306729102, 0.9999633304112948, 0.001980127323861929, 0.98920505416126], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.9545750084640168, 0.8010221013459997, 0.0010768383774790136, 4.5285242317457465e-05, 0.7122118355741406, 0.15925456350948017, 0.01931318272553877, 0.9275966551817966], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.9740046107415464, 0.9999348677511988, 0.0011280131366589966, 0.003142384557448156, 0.8576418924567042, 0.9545333286657802, 0.7904262178559613, 0.014075106739490166], [0.975712869813101, 0.9999001373682023, 0.0016433498121412875, 0.00232889889758488, 0.6572861064040806, 0.9605841545972731, 0.4331787937043679, 0.039429061212534595], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.9569978143865792, 0.9999502994171116, 0.001647766104709314, 0.006770414812663102, 0.6078947163963356, 0.9739969468541787, 0.7712208386517876, 0.03224650722349818], [0.9924837778486827, 0.9989268643102444, 0.0012191648870903842, 0.0002908986915298129, 0.9926994542722708, 0.6874496494738243, 0.9839911840642187, 0.01377555881949025], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.8712277660710959, 0.9998792147767195, 0.009233476741708865, 0.006651887470136875, 0.2846353876132319, 0.8606757345444841, 0.905799724846877, 0.10941869528527111], [0.9492409733663436, 0.9998720501202061, 0.00026265005707428097, 0.014216156794433854, 0.7253270042206533, 0.9930368078663476, 0.835602039566071, 0.052035705737287156], [0.3496790238264589, 0.9951780835506667, 0.007652476325125691, 0.016407282129121008, 0.4781565714146621, 0.9680365649603411, 0.9980740663951763, 0.8146739775981933], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.8642116426125306, 0.9999076857072546, 0.0019158998972424609, 0.012520957717797179, 0.35861923963866427, 0.9596018297693715, 0.925928134148489, 0.12797202962078064], [0.9128455984341999, 0.9999544829433648, 0.004761634982689357, 0.006700794779618401, 0.3874082764581125, 0.9329089125208433, 0.8465610034125904, 0.0596793947015072], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.7664603779054239, 0.9978133594063298, 0.030699500602800852, 0.0014420041420162499, 0.5525253184104642, 0.2475590149715773, 0.6243340601317057, 0.3072161596577674], [0.9545750084640168, 0.8010221013459997, 0.0010768383774790136, 4.5285242317457465e-05, 0.7122118355741406, 0.15925456350948017, 0.01931318272553877, 0.9275966551817966], [0.8823009792529095, 0.5060423110732729, 3.0857599388026564e-05, 0.015419420348934627, 0.9922375740571194, 0.6075090773322849, 0.8615946862392778, 0.7323110858792001], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.9569978143865792, 0.9999502994171116, 0.001647766104709314, 0.006770414812663102, 0.6078947163963356, 0.9739969468541787, 0.7712208386517876, 0.03224650722349818], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.9906780299956643, 0.9998534146447304, 0.00043349919664669275, 0.0021346315842544383, 0.9027977918684693, 0.9787792894670917, 0.5383102021369034, 0.020985795792433527], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.6961927609998032, 0.7995011681043653, 0.007478646672682654, 0.0006575746380115004, 0.8535660912449907, 0.005791708319848361, 0.5084921008279353, 0.29734664554467366], [0.7876567498967855, 0.994615825699678, 0.0009521703700097382, 0.027907783035342672, 0.17821642193876197, 0.978884650851592, 0.9847004284184796, 0.9882120357135643], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.43011239510426436, 0.9100478388500701, 0.0015393320922250208, 0.00045918819404481973, 0.9979542901758269, 0.8626589457233201, 0.9932258187487738, 0.024102697425247333], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.8061349450965536, 0.999767150024817, 0.005080980536513046, 0.01040826816049117, 0.3409665942145532, 0.9097666313819003, 0.9870252543663042, 0.08656560010172533], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.04848322956586269, 0.6519622002511831, 0.003934940622089357, 0.007727537091957959, 0.925691919989501, 0.9008818405453288, 0.9996227820171267, 0.8388979769520879], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.9135271257100342, 0.996591601811556, 0.014580504042979716, 0.0002899709156167404, 0.9912420642891241, 0.5609059388773838, 0.9936845485989105, 0.02069013167663768], [0.9906780299956643, 0.9998534146447304, 0.00043349919664669275, 0.0021346315842544383, 0.9027977918684693, 0.9787792894670917, 0.5383102021369034, 0.020985795792433527], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.43011239510426436, 0.9100478388500701, 0.0015393320922250208, 0.00045918819404481973, 0.9979542901758269, 0.8626589457233201, 0.9932258187487738, 0.024102697425247333], [0.8823009792529095, 0.5060423110732729, 3.0857599388026564e-05, 0.015419420348934627, 0.9922375740571194, 0.6075090773322849, 0.8615946862392778, 0.7323110858792001], [0.974298216577679, 0.9987821181807213, 0.004425694115241892, 0.000304187728708465, 0.9895635230597039, 0.5532842514531658, 0.9918324219714489, 0.012952455538034254], [0.974298216577679, 0.9987821181807213, 0.004425694115241892, 0.000304187728708465, 0.9895635230597039, 0.5532842514531658, 0.9918324219714489, 0.012952455538034254], [0.8721615715499735, 0.9999086487036207, 0.0052438766262562575, 0.009269188454813193, 0.30045710434598644, 0.9226049121166215, 0.919673621976729, 0.11699230898834169], [0.31246477046969495, 0.9922885565366311, 0.010622950850010143, 0.012353719030255849, 0.5544625807993329, 0.9562633327178526, 0.9981487629810418, 0.8235470252929691], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.7075313278895118, 0.9528165764663733, 0.00024028256279746944, 0.0419883730929068, 0.2223082190799704, 0.9918680521573371, 0.9957023140585564, 0.9993871853584136], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.9394784796966786, 0.9997140044878048, 0.005478297748100236, 0.0008731998901328106, 0.2989775965870789, 0.7627778793761508, 0.20634254240689162, 0.21503350720998343], [0.31246477046969495, 0.9922885565366311, 0.010622950850010143, 0.012353719030255849, 0.5544625807993329, 0.9562633327178526, 0.9981487629810418, 0.8235470252929691], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.8642116426125306, 0.9999076857072546, 0.0019158998972424609, 0.012520957717797179, 0.35861923963866427, 0.9596018297693715, 0.925928134148489, 0.12797202962078064], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.8894884311869264, 0.9997973322992432, 0.010176229478222287, 0.003641320387798874, 0.41977764722759664, 0.7149835924672313, 0.8334698236404028, 0.060271472885201315], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.9696309451921452, 0.9999996719273639, 0.00021927544176994542, 0.004065877251051302, 0.6149022306729102, 0.9999633304112948, 0.001980127323861929, 0.98920505416126], [0.9569978143865792, 0.9999502994171116, 0.001647766104709314, 0.006770414812663102, 0.6078947163963356, 0.9739969468541787, 0.7712208386517876, 0.03224650722349818], [0.9696309451921452, 0.9999996719273639, 0.00021927544176994542, 0.004065877251051302, 0.6149022306729102, 0.9999633304112948, 0.001980127323861929, 0.98920505416126], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.9740046107415464, 0.9999348677511988, 0.0011280131366589966, 0.003142384557448156, 0.8576418924567042, 0.9545333286657802, 0.7904262178559613, 0.014075106739490166], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.9696309451921452, 0.9999996719273639, 0.00021927544176994542, 0.004065877251051302, 0.6149022306729102, 0.9999633304112948, 0.001980127323861929, 0.98920505416126], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.9906780299956643, 0.9998534146447304, 0.00043349919664669275, 0.0021346315842544383, 0.9027977918684693, 0.9787792894670917, 0.5383102021369034, 0.020985795792433527], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.9545750084640168, 0.8010221013459997, 0.0010768383774790136, 4.5285242317457465e-05, 0.7122118355741406, 0.15925456350948017, 0.01931318272553877, 0.9275966551817966], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.8849374301089054, 0.9998694186934906, 0.005269414155628153, 0.003970307908163266, 0.7044132096685062, 0.8262784437953502, 0.7945511788980267, 0.020190231346340615], [0.34611195464250893, 0.9993957670342325, 0.00801798220359197, 0.005710054448791813, 0.2542505785607144, 0.9526348574950757, 0.9984054781924007, 0.2510543692146448], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.9696309451921452, 0.9999996719273639, 0.00021927544176994542, 0.004065877251051302, 0.6149022306729102, 0.9999633304112948, 0.001980127323861929, 0.98920505416126], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.04141089323783879, 0.9910551102522079, 1.654425071079162e-05, 0.015903012386923807, 0.9188840210179037, 0.9607844596952501, 0.988456321290572, 0.9236698877001335], [0.7876567498967855, 0.994615825699678, 0.0009521703700097382, 0.027907783035342672, 0.17821642193876197, 0.978884650851592, 0.9847004284184796, 0.9882120357135643], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.9492409733663436, 0.9998720501202061, 0.00026265005707428097, 0.014216156794433854, 0.7253270042206533, 0.9930368078663476, 0.835602039566071, 0.052035705737287156], [0.9128455984341999, 0.9999544829433648, 0.004761634982689357, 0.006700794779618401, 0.3874082764581125, 0.9329089125208433, 0.8465610034125904, 0.0596793947015072], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.04848322956586269, 0.6519622002511831, 0.003934940622089357, 0.007727537091957959, 0.925691919989501, 0.9008818405453288, 0.9996227820171267, 0.8388979769520879], [0.9135271257100342, 0.996591601811556, 0.014580504042979716, 0.0002899709156167404, 0.9912420642891241, 0.5609059388773838, 0.9936845485989105, 0.02069013167663768], [0.9906780299956643, 0.9998534146447304, 0.00043349919664669275, 0.0021346315842544383, 0.9027977918684693, 0.9787792894670917, 0.5383102021369034, 0.020985795792433527], [0.9906780299956643, 0.9998534146447304, 0.00043349919664669275, 0.0021346315842544383, 0.9027977918684693, 0.9787792894670917, 0.5383102021369034, 0.020985795792433527], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.7876567498967855, 0.994615825699678, 0.0009521703700097382, 0.027907783035342672, 0.17821642193876197, 0.978884650851592, 0.9847004284184796, 0.9882120357135643], [0.9569978143865792, 0.9999502994171116, 0.001647766104709314, 0.006770414812663102, 0.6078947163963356, 0.9739969468541787, 0.7712208386517876, 0.03224650722349818], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.31246477046969495, 0.9922885565366311, 0.010622950850010143, 0.012353719030255849, 0.5544625807993329, 0.9562633327178526, 0.9981487629810418, 0.8235470252929691], [0.9872672484915054, 0.9999085260266926, 3.251124892882648e-05, 0.012813790423038318, 0.8739435201397908, 0.9994862817595647, 0.14787339150433546, 0.0903285883262022], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.9906780299956643, 0.9998534146447304, 0.00043349919664669275, 0.0021346315842544383, 0.9027977918684693, 0.9787792894670917, 0.5383102021369034, 0.020985795792433527], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.8721615715499735, 0.9999086487036207, 0.0052438766262562575, 0.009269188454813193, 0.30045710434598644, 0.9226049121166215, 0.919673621976729, 0.11699230898834169], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.04141089323783879, 0.9910551102522079, 1.654425071079162e-05, 0.015903012386923807, 0.9188840210179037, 0.9607844596952501, 0.988456321290572, 0.9236698877001335], [0.7664603779054239, 0.9978133594063298, 0.030699500602800852, 0.0014420041420162499, 0.5525253184104642, 0.2475590149715773, 0.6243340601317057, 0.3072161596577674], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.9696309451921452, 0.9999996719273639, 0.00021927544176994542, 0.004065877251051302, 0.6149022306729102, 0.9999633304112948, 0.001980127323861929, 0.98920505416126], [0.43011239510426436, 0.9100478388500701, 0.0015393320922250208, 0.00045918819404481973, 0.9979542901758269, 0.8626589457233201, 0.9932258187487738, 0.024102697425247333], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.8849374301089054, 0.9998694186934906, 0.005269414155628153, 0.003970307908163266, 0.7044132096685062, 0.8262784437953502, 0.7945511788980267, 0.020190231346340615], [0.8894884311869264, 0.9997973322992432, 0.010176229478222287, 0.003641320387798874, 0.41977764722759664, 0.7149835924672313, 0.8334698236404028, 0.060271472885201315], [0.9872672484915054, 0.9999085260266926, 3.251124892882648e-05, 0.012813790423038318, 0.8739435201397908, 0.9994862817595647, 0.14787339150433546, 0.0903285883262022], [0.9128455984341999, 0.9999544829433648, 0.004761634982689357, 0.006700794779618401, 0.3874082764581125, 0.9329089125208433, 0.8465610034125904, 0.0596793947015072], [0.8721615715499735, 0.9999086487036207, 0.0052438766262562575, 0.009269188454813193, 0.30045710434598644, 0.9226049121166215, 0.919673621976729, 0.11699230898834169], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.975712869813101, 0.9999001373682023, 0.0016433498121412875, 0.00232889889758488, 0.6572861064040806, 0.9605841545972731, 0.4331787937043679, 0.039429061212534595], [0.34611195464250893, 0.9993957670342325, 0.00801798220359197, 0.005710054448791813, 0.2542505785607144, 0.9526348574950757, 0.9984054781924007, 0.2510543692146448], [0.3496790238264589, 0.9951780835506667, 0.007652476325125691, 0.016407282129121008, 0.4781565714146621, 0.9680365649603411, 0.9980740663951763, 0.8146739775981933], [0.7075313278895118, 0.9528165764663733, 0.00024028256279746944, 0.0419883730929068, 0.2223082190799704, 0.9918680521573371, 0.9957023140585564, 0.9993871853584136], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.6961927609998032, 0.7995011681043653, 0.007478646672682654, 0.0006575746380115004, 0.8535660912449907, 0.005791708319848361, 0.5084921008279353, 0.29734664554467366], [0.5749599830101485, 0.9687977761988059, 0.0023813640235534695, 0.1541842525112368, 0.7412670211815588, 0.8581884113393773, 0.9810544594619939, 0.9945756272130561], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.31246477046969495, 0.9922885565366311, 0.010622950850010143, 0.012353719030255849, 0.5544625807993329, 0.9562633327178526, 0.9981487629810418, 0.8235470252929691], [0.9906780299956643, 0.9998534146447304, 0.00043349919664669275, 0.0021346315842544383, 0.9027977918684693, 0.9787792894670917, 0.5383102021369034, 0.020985795792433527], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.31246477046969495, 0.9922885565366311, 0.010622950850010143, 0.012353719030255849, 0.5544625807993329, 0.9562633327178526, 0.9981487629810418, 0.8235470252929691], [0.975712869813101, 0.9999001373682023, 0.0016433498121412875, 0.00232889889758488, 0.6572861064040806, 0.9605841545972731, 0.4331787937043679, 0.039429061212534595], [0.7012293591097228, 0.8327430302146692, 0.013966589317056095, 0.0005612774655919669, 0.7105106847585556, 0.005866919280500998, 0.28244701408605766, 0.4480142542360306], [0.9491101867098894, 0.9983663541388137, 0.009796995267325935, 0.00029265245163625337, 0.9888247829552145, 0.501870738402459, 0.9936141020286289, 0.014508537600457536], [0.619403035234355, 0.9991453426023565, 0.0021880819591869292, 0.0003606900682726335, 0.9887330224692878, 0.4438365944578517, 0.12135225532662065, 0.06295738892599204], [0.9545750084640168, 0.8010221013459997, 0.0010768383774790136, 4.5285242317457465e-05, 0.7122118355741406, 0.15925456350948017, 0.01931318272553877, 0.9275966551817966], [0.9507592551658328, 0.9999578798765617, 0.003038334154758543, 0.004478067472099255, 0.6414350714982088, 0.9448634833072753, 0.7478135722602877, 0.02081104610807196], [0.9706902493828689, 0.9999014348161821, 0.0002482976630330183, 0.009843343882128703, 0.7945228378810836, 0.9932700052886259, 0.7383940887955424, 0.030195780887904554], [0.9491101867098894, 0.9983663541388137, 0.009796995267325935, 0.00029265245163625337, 0.9888247829552145, 0.501870738402459, 0.9936141020286289, 0.014508537600457536], [0.6934857767802269, 0.7838636051546535, 0.00978088179529716, 0.000589466785014496, 0.7856126934390497, 0.005601083146058356, 0.36953863915183255, 0.3925057375012165], [0.9492409733663436, 0.9998720501202061, 0.00026265005707428097, 0.014216156794433854, 0.7253270042206533, 0.9930368078663476, 0.835602039566071, 0.052035705737287156], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.9128455984341999, 0.9999544829433648, 0.004761634982689357, 0.006700794779618401, 0.3874082764581125, 0.9329089125208433, 0.8465610034125904, 0.0596793947015072], [0.9128455984341999, 0.9999544829433648, 0.004761634982689357, 0.006700794779618401, 0.3874082764581125, 0.9329089125208433, 0.8465610034125904, 0.0596793947015072], [0.8642116426125306, 0.9999076857072546, 0.0019158998972424609, 0.012520957717797179, 0.35861923963866427, 0.9596018297693715, 0.925928134148489, 0.12797202962078064], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.9204991475269404, 0.997820405498567, 0.004755920056928505, 0.00027945528545256926, 0.31836246282515795, 0.5098160564019794, 0.0638457240331752, 0.5113664414372071], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.43011239510426436, 0.9100478388500701, 0.0015393320922250208, 0.00045918819404481973, 0.9979542901758269, 0.8626589457233201, 0.9932258187487738, 0.024102697425247333], [0.8721615715499735, 0.9999086487036207, 0.0052438766262562575, 0.009269188454813193, 0.30045710434598644, 0.9226049121166215, 0.919673621976729, 0.11699230898834169], [0.8823009792529095, 0.5060423110732729, 3.0857599388026564e-05, 0.015419420348934627, 0.9922375740571194, 0.6075090773322849, 0.8615946862392778, 0.7323110858792001], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.9394784796966786, 0.9997140044878048, 0.005478297748100236, 0.0008731998901328106, 0.2989775965870789, 0.7627778793761508, 0.20634254240689162, 0.21503350720998343], [0.7075313278895118, 0.9528165764663733, 0.00024028256279746944, 0.0419883730929068, 0.2223082190799704, 0.9918680521573371, 0.9957023140585564, 0.9993871853584136], [0.8642116426125306, 0.9999076857072546, 0.0019158998972424609, 0.012520957717797179, 0.35861923963866427, 0.9596018297693715, 0.925928134148489, 0.12797202962078064], [0.9872672484915054, 0.9999085260266926, 3.251124892882648e-05, 0.012813790423038318, 0.8739435201397908, 0.9994862817595647, 0.14787339150433546, 0.0903285883262022], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.9704622089521452, 0.999980134149838, 8.297904208201591e-05, 0.021668768903126457, 0.7479681291686885, 0.9998856731076894, 0.08336781247330446, 0.4915633904018265], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.31246477046969495, 0.9922885565366311, 0.010622950850010143, 0.012353719030255849, 0.5544625807993329, 0.9562633327178526, 0.9981487629810418, 0.8235470252929691], [0.9872672484915054, 0.9999085260266926, 3.251124892882648e-05, 0.012813790423038318, 0.8739435201397908, 0.9994862817595647, 0.14787339150433546, 0.0903285883262022], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.9740046107415464, 0.9999348677511988, 0.0011280131366589966, 0.003142384557448156, 0.8576418924567042, 0.9545333286657802, 0.7904262178559613, 0.014075106739490166], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.9128455984341999, 0.9999544829433648, 0.004761634982689357, 0.006700794779618401, 0.3874082764581125, 0.9329089125208433, 0.8465610034125904, 0.0596793947015072], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.6961927609998032, 0.7995011681043653, 0.007478646672682654, 0.0006575746380115004, 0.8535660912449907, 0.005791708319848361, 0.5084921008279353, 0.29734664554467366], [0.9924837778486827, 0.9989268643102444, 0.0012191648870903842, 0.0002908986915298129, 0.9926994542722708, 0.6874496494738243, 0.9839911840642187, 0.01377555881949025], [0.9226443244561893, 0.9999047545515034, 0.003817856913325416, 0.0026048172484790626, 0.8323676248245497, 0.8508602288493949, 0.8736538657332169, 0.012228440574850534], [0.3496790238264589, 0.9951780835506667, 0.007652476325125691, 0.016407282129121008, 0.4781565714146621, 0.9680365649603411, 0.9980740663951763, 0.8146739775981933], [0.9872672484915054, 0.9999085260266926, 3.251124892882648e-05, 0.012813790423038318, 0.8739435201397908, 0.9994862817595647, 0.14787339150433546, 0.0903285883262022], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.9128455984341999, 0.9999544829433648, 0.004761634982689357, 0.006700794779618401, 0.3874082764581125, 0.9329089125208433, 0.8465610034125904, 0.0596793947015072], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.3496790238264589, 0.9951780835506667, 0.007652476325125691, 0.016407282129121008, 0.4781565714146621, 0.9680365649603411, 0.9980740663951763, 0.8146739775981933], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.8543488501341918, 0.9994197951979756, 0.09380546888780658, 0.000562194982871237, 0.21553697035502742, 0.06837306353910003, 0.23240834373974836, 0.8140718925798439], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.9545750084640168, 0.8010221013459997, 0.0010768383774790136, 4.5285242317457465e-05, 0.7122118355741406, 0.15925456350948017, 0.01931318272553877, 0.9275966551817966], [0.7075313278895118, 0.9528165764663733, 0.00024028256279746944, 0.0419883730929068, 0.2223082190799704, 0.9918680521573371, 0.9957023140585564, 0.9993871853584136], [0.9128455984341999, 0.9999544829433648, 0.004761634982689357, 0.006700794779618401, 0.3874082764581125, 0.9329089125208433, 0.8465610034125904, 0.0596793947015072], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.43011239510426436, 0.9100478388500701, 0.0015393320922250208, 0.00045918819404481973, 0.9979542901758269, 0.8626589457233201, 0.9932258187487738, 0.024102697425247333], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.43011239510426436, 0.9100478388500701, 0.0015393320922250208, 0.00045918819404481973, 0.9979542901758269, 0.8626589457233201, 0.9932258187487738, 0.024102697425247333], [0.5108280295744728, 0.6702208738872165, 0.01948198718439859, 0.035250853267872216, 0.9754343463046699, 0.9481264874174522, 0.044700329944979335, 0.961185069220871], [0.4967933007934249, 0.9984135924826639, 1.5335792405153333e-05, 0.017272393430616168, 0.1292363646176596, 0.7311752482015244, 0.9811854375511448, 0.8973595259834837], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.8894884311869264, 0.9997973322992432, 0.010176229478222287, 0.003641320387798874, 0.41977764722759664, 0.7149835924672313, 0.8334698236404028, 0.060271472885201315], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.9545750084640168, 0.8010221013459997, 0.0010768383774790136, 4.5285242317457465e-05, 0.7122118355741406, 0.15925456350948017, 0.01931318272553877, 0.9275966551817966], [0.8721615715499735, 0.9999086487036207, 0.0052438766262562575, 0.009269188454813193, 0.30045710434598644, 0.9226049121166215, 0.919673621976729, 0.11699230898834169], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.9492409733663436, 0.9998720501202061, 0.00026265005707428097, 0.014216156794433854, 0.7253270042206533, 0.9930368078663476, 0.835602039566071, 0.052035705737287156], [0.8642116426125306, 0.9999076857072546, 0.0019158998972424609, 0.012520957717797179, 0.35861923963866427, 0.9596018297693715, 0.925928134148489, 0.12797202962078064], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.776129678035095, 0.9970975123726578, 0.03244256871831058, 0.001317435396159217, 0.7391903639056268, 0.23641614853311693, 0.5310891814071078, 0.19653050325165272], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.7876567498967855, 0.994615825699678, 0.0009521703700097382, 0.027907783035342672, 0.17821642193876197, 0.978884650851592, 0.9847004284184796, 0.9882120357135643], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.8963303045745026, 0.9998277776778397, 0.007547668016307629, 0.0036560828757363512, 0.5541942035582119, 0.7532551118778735, 0.8052728188364573, 0.03582774370443423], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.9569978143865792, 0.9999502994171116, 0.001647766104709314, 0.006770414812663102, 0.6078947163963356, 0.9739969468541787, 0.7712208386517876, 0.03224650722349818], [0.3496790238264589, 0.9951780835506667, 0.007652476325125691, 0.016407282129121008, 0.4781565714146621, 0.9680365649603411, 0.9980740663951763, 0.8146739775981933], [0.7372257136693634, 0.938567604336992, 0.026004663732226856, 0.0005686676195921547, 0.5809664580676194, 0.007321166466226545, 0.2272096553269907, 0.4790376579682714], [0.8849374301089054, 0.9998694186934906, 0.005269414155628153, 0.003970307908163266, 0.7044132096685062, 0.8262784437953502, 0.7945511788980267, 0.020190231346340615], [0.978445677272435, 0.9999471521815015, 4.866370729184509e-05, 0.019170009915811623, 0.833206865891295, 0.9997491229497221, 0.14325675077911612, 0.17909391239983039], [0.9491101867098894, 0.9983663541388137, 0.009796995267325935, 0.00029265245163625337, 0.9888247829552145, 0.501870738402459, 0.9936141020286289, 0.014508537600457536], [0.5749599830101485, 0.9687977761988059, 0.0023813640235534695, 0.1541842525112368, 0.7412670211815588, 0.8581884113393773, 0.9810544594619939, 0.9945756272130561], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.6961927609998032, 0.7995011681043653, 0.007478646672682654, 0.0006575746380115004, 0.8535660912449907, 0.005791708319848361, 0.5084921008279353, 0.29734664554467366], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.9872672484915054, 0.9999085260266926, 3.251124892882648e-05, 0.012813790423038318, 0.8739435201397908, 0.9994862817595647, 0.14787339150433546, 0.0903285883262022], [0.19891982321443755, 0.9727865079910895, 0.010048620411476908, 0.008905283646381315, 0.7093216163170458, 0.9326607110517935, 0.998707935866274, 0.8442503392330533], [0.7375870099958431, 0.9988539903041164, 0.017913147226297266, 0.0009969104511245105, 0.9488836565731239, 0.43193280419975943, 0.2515041542632707, 0.0911860939450549], [0.17102823074178647, 0.921672489899098, 8.137296990963463e-05, 0.0003217228911196817, 0.9675451799796912, 0.2549105616069, 0.0359009891469465, 0.8953767059536982], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.9491101867098894, 0.9983663541388137, 0.009796995267325935, 0.00029265245163625337, 0.9888247829552145, 0.501870738402459, 0.9936141020286289, 0.014508537600457536], [0.9176992477549741, 0.9999502187446484, 0.005854050371509827, 0.004812912354420178, 0.49409953641841264, 0.8995651182781346, 0.7887871894764301, 0.03374115274180967], [0.9990270594681151, 0.9992024376726627, 0.0002809389512086013, 0.00035429436437890255, 0.9949741809459853, 0.9370486510831442, 0.8206930684307541, 0.03373561372545756], [0.9518440667034287, 0.9999322475460639, 0.0023663519563026955, 0.0027835929603728534, 0.8298605462104053, 0.9117615750561967, 0.8406020560342966, 0.013539684829712989], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.04848322956586269, 0.6519622002511831, 0.003934940622089357, 0.007727537091957959, 0.925691919989501, 0.9008818405453288, 0.9996227820171267, 0.8388979769520879], [0.9545750084640168, 0.8010221013459997, 0.0010768383774790136, 4.5285242317457465e-05, 0.7122118355741406, 0.15925456350948017, 0.01931318272553877, 0.9275966551817966], [0.3496790238264589, 0.9951780835506667, 0.007652476325125691, 0.016407282129121008, 0.4781565714146621, 0.9680365649603411, 0.9980740663951763, 0.8146739775981933], [0.9545750084640168, 0.8010221013459997, 0.0010768383774790136, 4.5285242317457465e-05, 0.7122118355741406, 0.15925456350948017, 0.01931318272553877, 0.9275966551817966], [0.04141089323783879, 0.9910551102522079, 1.654425071079162e-05, 0.015903012386923807, 0.9188840210179037, 0.9607844596952501, 0.988456321290572, 0.9236698877001335], [0.8823009792529095, 0.5060423110732729, 3.0857599388026564e-05, 0.015419420348934627, 0.9922375740571194, 0.6075090773322849, 0.8615946862392778, 0.7323110858792001], [0.9369072306312761, 0.9993878939740114, 0.0008856313203485793, 0.013033162404695884, 0.6514076875004533, 0.6233886085267041, 0.980799938303876, 0.03609248827943213], [0.7817661038946951, 0.9982740564226691, 0.003030285286542697, 0.018728792628455553, 0.18913890054838953, 0.9589742499609987, 0.9773475436545488, 0.9249790728009202], [0.9135271257100342, 0.996591601811556, 0.014580504042979716, 0.0002899709156167404, 0.9912420642891241, 0.5609059388773838, 0.9936845485989105, 0.02069013167663768], [0.7800979980482294, 0.9988597906888388, 0.005716551291513632, 0.012103533051242147, 0.21389128606870436, 0.9187753534003414, 0.9701925693068498, 0.8036650127034269], [0.7664603779054239, 0.9978133594063298, 0.030699500602800852, 0.0014420041420162499, 0.5525253184104642, 0.2475590149715773, 0.6243340601317057, 0.3072161596577674], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.9940756574013799, 0.999851937423747, 2.653066991749665e-05, 0.006737893689430976, 0.9067979203601093, 0.9986603148308576, 0.14473133131990562, 0.05638620510711369], [0.8849374301089054, 0.9998694186934906, 0.005269414155628153, 0.003970307908163266, 0.7044132096685062, 0.8262784437953502, 0.7945511788980267, 0.020190231346340615], [0.7664603779054239, 0.9978133594063298, 0.030699500602800852, 0.0014420041420162499, 0.5525253184104642, 0.2475590149715773, 0.6243340601317057, 0.3072161596577674], [0.9940756574013799, 0.999851937423747, 2.653066991749665e-05, 0.006737893689430976, 0.9067979203601093, 0.9986603148308576, 0.14473133131990562, 0.05638620510711369], [0.7662682447191406, 0.997599928217091, 0.02819312554129104, 0.0012060215904267162, 0.8686257327720966, 0.2924863164620805, 0.40712367546162925, 0.13521557682265065], [0.7751657839532856, 0.9987162077641905, 0.007579532553743829, 0.006477469560615031, 0.25559576927242644, 0.8101956877099769, 0.9531899363826354, 0.6827158363899256], [0.6358328845680686, 0.7914864077399559, 0.0006694207063825217, 0.00201114097386322, 0.9886297709896402, 0.5614351456589722, 0.9640617323854042, 0.8173319747752941], [0.8061349450965536, 0.999767150024817, 0.005080980536513046, 0.01040826816049117, 0.3409665942145532, 0.9097666313819003, 0.9870252543663042, 0.08656560010172533], [0.9135271257100342, 0.996591601811556, 0.014580504042979716, 0.0002899709156167404, 0.9912420642891241, 0.5609059388773838, 0.9936845485989105, 0.02069013167663768]]
output_set2 = [[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.920517, 0.997818, 0.004761, 0.000275, 0.318375, 0.509796, 0.063858, 0.511337],[0.922638, 0.999908, 0.003815, 0.002609, 0.832367, 0.850876, 0.873657, 0.012222],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.048477, 0.651978, 0.003937, 0.007721, 0.925690, 0.900894, 0.999619, 0.838852],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.349655, 0.995178, 0.007660, 0.016403, 0.478165, 0.968033, 0.998077, 0.814682],[0.787643, 0.994614, 0.000946, 0.027908, 0.178192, 0.978897, 0.984711, 0.988205],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.975723, 0.999893, 0.001648, 0.002335, 0.657272, 0.960587, 0.433182, 0.039429],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.920517, 0.997818, 0.004761, 0.000275, 0.318375, 0.509796, 0.063858, 0.511337],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.871216, 0.999878, 0.009232, 0.006653, 0.284637, 0.860703, 0.905807, 0.109421],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.949066, 0.998367, 0.009796, 0.000290, 0.988815, 0.501862, 0.993622, 0.014511],[0.766479, 0.997803, 0.030701, 0.001450, 0.552551, 0.247543, 0.624329, 0.307205],[0.048477, 0.651978, 0.003937, 0.007721, 0.925690, 0.900894, 0.999619, 0.838852],[0.696182, 0.799484, 0.007477, 0.000656, 0.853561, 0.005798, 0.508499, 0.297363],[0.872177, 0.999908, 0.005249, 0.009277, 0.300461, 0.922607, 0.919662, 0.116989],[0.312424, 0.992294, 0.010620, 0.012360, 0.554459, 0.956268, 0.998138, 0.823547],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.990677, 0.999847, 0.000427, 0.002136, 0.902786, 0.978775, 0.538300, 0.020981],[0.939484, 0.999710, 0.005478, 0.000870, 0.298981, 0.762772, 0.206345, 0.215042],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.693481, 0.783890, 0.009781, 0.000595, 0.785599, 0.005600, 0.369492, 0.392532],[0.693481, 0.783890, 0.009781, 0.000595, 0.785599, 0.005600, 0.369492, 0.392532],[0.312424, 0.992294, 0.010620, 0.012360, 0.554459, 0.956268, 0.998138, 0.823547],[0.312424, 0.992294, 0.010620, 0.012360, 0.554459, 0.956268, 0.998138, 0.823547],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.882355, 0.506104, 0.000031, 0.015427, 0.992233, 0.607513, 0.861603, 0.732346],[0.912857, 0.999954, 0.004761, 0.006699, 0.387405, 0.932907, 0.846558, 0.059677],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.939484, 0.999710, 0.005478, 0.000870, 0.298981, 0.762772, 0.206345, 0.215042],[0.994080, 0.999847, -0.000031, 0.006744, 0.906799, 0.998657, 0.144714, 0.056381],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.806107, 0.999771, 0.005081, 0.010406, 0.340973, 0.909775, 0.987030, 0.086563],[0.430161, 0.910034, 0.001541, 0.000458, 0.997955, 0.862640, 0.993225, 0.024094],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.957001, 0.999954, 0.001648, 0.006775, 0.607941, 0.973999, 0.771210, 0.032242],[0.922638, 0.999908, 0.003815, 0.002609, 0.832367, 0.850876, 0.873657, 0.012222],[0.766479, 0.997803, 0.030701, 0.001450, 0.552551, 0.247543, 0.624329, 0.307205],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.346146, 0.999390, 0.008026, 0.005707, 0.254257, 0.952637, 0.998398, 0.251038],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.312424, 0.992294, 0.010620, 0.012360, 0.554459, 0.956268, 0.998138, 0.823547],[0.766479, 0.997803, 0.030701, 0.001450, 0.552551, 0.247543, 0.624329, 0.307205],[0.913528, 0.996597, 0.014587, 0.000290, 0.991241, 0.560883, 0.993698, 0.020691],[0.872177, 0.999908, 0.005249, 0.009277, 0.300461, 0.922607, 0.919662, 0.116989],[0.766479, 0.997803, 0.030701, 0.001450, 0.552551, 0.247543, 0.624329, 0.307205],[0.920517, 0.997818, 0.004761, 0.000275, 0.318375, 0.509796, 0.063858, 0.511337],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.889496, 0.999802, 0.010178, 0.003647, 0.419754, 0.714966, 0.833466, 0.060272],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.766479, 0.997803, 0.030701, 0.001450, 0.552551, 0.247543, 0.624329, 0.307205],[0.975723, 0.999893, 0.001648, 0.002335, 0.657272, 0.960587, 0.433182, 0.039429],[0.864212, 0.999908, 0.001923, 0.012527, 0.358627, 0.959610, 0.925919, 0.127960],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.884933, 0.999863, 0.005264, 0.003967, 0.704391, 0.826279, 0.794556, 0.020187],[0.346146, 0.999390, 0.008026, 0.005707, 0.254257, 0.952637, 0.998398, 0.251038],[0.987274, 0.999908, 0.000031, 0.012817, 0.873932, 0.999481, 0.147873, 0.090332],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.048477, 0.651978, 0.003937, 0.007721, 0.925690, 0.900894, 0.999619, 0.838852],[0.954575, 0.801010, 0.001083, 0.000046, 0.712189, 0.159256, 0.019318, 0.927597],[0.994080, 0.999847, -0.000031, 0.006744, 0.906799, 0.998657, 0.144714, 0.056381],[0.969620, 1.000000, 0.000214, 0.004059, 0.614899, 0.999969, 0.001984, 0.989212],[0.806107, 0.999771, 0.005081, 0.010406, 0.340973, 0.909775, 0.987030, 0.086563],[0.957001, 0.999954, 0.001648, 0.006775, 0.607941, 0.973999, 0.771210, 0.032242],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.992493, 0.998932, 0.001221, 0.000290, 0.992706, 0.687454, 0.983994, 0.013779],[0.990677, 0.999847, 0.000427, 0.002136, 0.902786, 0.978775, 0.538300, 0.020981],[0.889496, 0.999802, 0.010178, 0.003647, 0.419754, 0.714966, 0.833466, 0.060272],[0.048477, 0.651978, 0.003937, 0.007721, 0.925690, 0.900894, 0.999619, 0.838852],[0.430161, 0.910034, 0.001541, 0.000458, 0.997955, 0.862640, 0.993225, 0.024094],[0.574905, 0.968796, 0.002380, 0.154190, 0.741287, 0.858200, 0.981064, 0.994583],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.949066, 0.998367, 0.009796, 0.000290, 0.988815, 0.501862, 0.993622, 0.014511],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.889496, 0.999802, 0.010178, 0.003647, 0.419754, 0.714966, 0.833466, 0.060272],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.872177, 0.999908, 0.005249, 0.009277, 0.300461, 0.922607, 0.919662, 0.116989],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.939484, 0.999710, 0.005478, 0.000870, 0.298981, 0.762772, 0.206345, 0.215042],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.806107, 0.999771, 0.005081, 0.010406, 0.340973, 0.909775, 0.987030, 0.086563],[0.041412, 0.991043, -0.000031, 0.015900, 0.918884, 0.960785, 0.988464, 0.923676],[0.992493, 0.998932, 0.001221, 0.000290, 0.992706, 0.687454, 0.983994, 0.013779],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.949066, 0.998367, 0.009796, 0.000290, 0.988815, 0.501862, 0.993622, 0.014511],[0.346146, 0.999390, 0.008026, 0.005707, 0.254257, 0.952637, 0.998398, 0.251038],[0.992493, 0.998932, 0.001221, 0.000290, 0.992706, 0.687454, 0.983994, 0.013779],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.913528, 0.996597, 0.014587, 0.000290, 0.991241, 0.560883, 0.993698, 0.020691],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.430161, 0.910034, 0.001541, 0.000458, 0.997955, 0.862640, 0.993225, 0.024094],[0.882355, 0.506104, 0.000031, 0.015427, 0.992233, 0.607513, 0.861603, 0.732346],[0.973999, 0.999939, 0.001129, 0.003143, 0.857651, 0.954544, 0.790390, 0.014084],[0.913528, 0.996597, 0.014587, 0.000290, 0.991241, 0.560883, 0.993698, 0.020691],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.882355, 0.506104, 0.000031, 0.015427, 0.992233, 0.607513, 0.861603, 0.732346],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.693481, 0.783890, 0.009781, 0.000595, 0.785599, 0.005600, 0.369492, 0.392532],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.994080, 0.999847, -0.000031, 0.006744, 0.906799, 0.998657, 0.144714, 0.056381],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.041412, 0.991043, -0.000031, 0.015900, 0.918884, 0.960785, 0.988464, 0.923676],[0.982056, 0.999908, 0.000244, 0.006699, 0.846207, 0.991959, 0.659744, 0.019653],[0.884933, 0.999863, 0.005264, 0.003967, 0.704391, 0.826279, 0.794556, 0.020187],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.766479, 0.997803, 0.030701, 0.001450, 0.552551, 0.247543, 0.624329, 0.307205],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.969620, 1.000000, 0.000214, 0.004059, 0.614899, 0.999969, 0.001984, 0.989212],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.312424, 0.992294, 0.010620, 0.012360, 0.554459, 0.956268, 0.998138, 0.823547],[0.949249, 0.999878, 0.000259, 0.014221, 0.725311, 0.993027, 0.835602, 0.052032],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.349655, 0.995178, 0.007660, 0.016403, 0.478165, 0.968033, 0.998077, 0.814682],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.696182, 0.799484, 0.007477, 0.000656, 0.853561, 0.005798, 0.508499, 0.297363],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.574905, 0.968796, 0.002380, 0.154190, 0.741287, 0.858200, 0.981064, 0.994583],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.987274, 0.999908, 0.000031, 0.012817, 0.873932, 0.999481, 0.147873, 0.090332],[0.969620, 1.000000, 0.000214, 0.004059, 0.614899, 0.999969, 0.001984, 0.989212],[0.696182, 0.799484, 0.007477, 0.000656, 0.853561, 0.005798, 0.508499, 0.297363],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.913528, 0.996597, 0.014587, 0.000290, 0.991241, 0.560883, 0.993698, 0.020691],[0.969620, 1.000000, 0.000214, 0.004059, 0.614899, 0.999969, 0.001984, 0.989212],[0.882355, 0.506104, 0.000031, 0.015427, 0.992233, 0.607513, 0.861603, 0.732346],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.992493, 0.998932, 0.001221, 0.000290, 0.992706, 0.687454, 0.983994, 0.013779],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.992493, 0.998932, 0.001221, 0.000290, 0.992706, 0.687454, 0.983994, 0.013779],[0.987274, 0.999908, 0.000031, 0.012817, 0.873932, 0.999481, 0.147873, 0.090332],[0.349655, 0.995178, 0.007660, 0.016403, 0.478165, 0.968033, 0.998077, 0.814682],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.949249, 0.999878, 0.000259, 0.014221, 0.725311, 0.993027, 0.835602, 0.052032],[0.949249, 0.999878, 0.000259, 0.014221, 0.725311, 0.993027, 0.835602, 0.052032],[0.957001, 0.999954, 0.001648, 0.006775, 0.607941, 0.973999, 0.771210, 0.032242],[0.787643, 0.994614, 0.000946, 0.027908, 0.178192, 0.978897, 0.984711, 0.988205],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.696182, 0.799484, 0.007477, 0.000656, 0.853561, 0.005798, 0.508499, 0.297363],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.872177, 0.999908, 0.005249, 0.009277, 0.300461, 0.922607, 0.919662, 0.116989],[0.889496, 0.999802, 0.010178, 0.003647, 0.419754, 0.714966, 0.833466, 0.060272],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.864212, 0.999908, 0.001923, 0.012527, 0.358627, 0.959610, 0.925919, 0.127960],[0.884933, 0.999863, 0.005264, 0.003967, 0.704391, 0.826279, 0.794556, 0.020187],[0.696182, 0.799484, 0.007477, 0.000656, 0.853561, 0.005798, 0.508499, 0.297363],[0.041412, 0.991043, -0.000031, 0.015900, 0.918884, 0.960785, 0.988464, 0.923676],[0.346146, 0.999390, 0.008026, 0.005707, 0.254257, 0.952637, 0.998398, 0.251038],[0.922638, 0.999908, 0.003815, 0.002609, 0.832367, 0.850876, 0.873657, 0.012222],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.048477, 0.651978, 0.003937, 0.007721, 0.925690, 0.900894, 0.999619, 0.838852],[0.992493, 0.998932, 0.001221, 0.000290, 0.992706, 0.687454, 0.983994, 0.013779],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.430161, 0.910034, 0.001541, 0.000458, 0.997955, 0.862640, 0.993225, 0.024094],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.787643, 0.994614, 0.000946, 0.027908, 0.178192, 0.978897, 0.984711, 0.988205],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.969620, 1.000000, 0.000214, 0.004059, 0.614899, 0.999969, 0.001984, 0.989212],[0.882355, 0.506104, 0.000031, 0.015427, 0.992233, 0.607513, 0.861603, 0.732346],[0.974304, 0.998779, 0.004425, 0.000305, 0.989563, 0.553268, 0.991821, 0.012955],[0.974304, 0.998779, 0.004425, 0.000305, 0.989563, 0.553268, 0.991821, 0.012955],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.707520, 0.952835, 0.000244, 0.041992, 0.222290, 0.991867, 0.995697, 0.999390],[0.806107, 0.999771, 0.005081, 0.010406, 0.340973, 0.909775, 0.987030, 0.086563],[0.871216, 0.999878, 0.009232, 0.006653, 0.284637, 0.860703, 0.905807, 0.109421],[0.346146, 0.999390, 0.008026, 0.005707, 0.254257, 0.952637, 0.998398, 0.251038],[0.957001, 0.999954, 0.001648, 0.006775, 0.607941, 0.973999, 0.771210, 0.032242],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.787643, 0.994614, 0.000946, 0.027908, 0.178192, 0.978897, 0.984711, 0.988205],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.922638, 0.999908, 0.003815, 0.002609, 0.832367, 0.850876, 0.873657, 0.012222],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.787643, 0.994614, 0.000946, 0.027908, 0.178192, 0.978897, 0.984711, 0.988205],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.973999, 0.999939, 0.001129, 0.003143, 0.857651, 0.954544, 0.790390, 0.014084],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.954575, 0.801010, 0.001083, 0.000046, 0.712189, 0.159256, 0.019318, 0.927597],[0.973999, 0.999939, 0.001129, 0.003143, 0.857651, 0.954544, 0.790390, 0.014084],[0.920517, 0.997818, 0.004761, 0.000275, 0.318375, 0.509796, 0.063858, 0.511337],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.806107, 0.999771, 0.005081, 0.010406, 0.340973, 0.909775, 0.987030, 0.086563],[0.312424, 0.992294, 0.010620, 0.012360, 0.554459, 0.956268, 0.998138, 0.823547],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.048477, 0.651978, 0.003937, 0.007721, 0.925690, 0.900894, 0.999619, 0.838852],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.041412, 0.991043, -0.000031, 0.015900, 0.918884, 0.960785, 0.988464, 0.923676],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.987274, 0.999908, 0.000031, 0.012817, 0.873932, 0.999481, 0.147873, 0.090332],[0.041412, 0.991043, -0.000031, 0.015900, 0.918884, 0.960785, 0.988464, 0.923676],[0.693481, 0.783890, 0.009781, 0.000595, 0.785599, 0.005600, 0.369492, 0.392532],[0.871216, 0.999878, 0.009232, 0.006653, 0.284637, 0.860703, 0.905807, 0.109421],[0.884933, 0.999863, 0.005264, 0.003967, 0.704391, 0.826279, 0.794556, 0.020187],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.041412, 0.991043, -0.000031, 0.015900, 0.918884, 0.960785, 0.988464, 0.923676],[0.882355, 0.506104, 0.000031, 0.015427, 0.992233, 0.607513, 0.861603, 0.732346],[0.766479, 0.997803, 0.030701, 0.001450, 0.552551, 0.247543, 0.624329, 0.307205],[0.430161, 0.910034, 0.001541, 0.000458, 0.997955, 0.862640, 0.993225, 0.024094],[0.882355, 0.506104, 0.000031, 0.015427, 0.992233, 0.607513, 0.861603, 0.732346],[0.913528, 0.996597, 0.014587, 0.000290, 0.991241, 0.560883, 0.993698, 0.020691],[0.048477, 0.651978, 0.003937, 0.007721, 0.925690, 0.900894, 0.999619, 0.838852],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.693481, 0.783890, 0.009781, 0.000595, 0.785599, 0.005600, 0.369492, 0.392532],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.806107, 0.999771, 0.005081, 0.010406, 0.340973, 0.909775, 0.987030, 0.086563],[0.048477, 0.651978, 0.003937, 0.007721, 0.925690, 0.900894, 0.999619, 0.838852],[0.982056, 0.999908, 0.000244, 0.006699, 0.846207, 0.991959, 0.659744, 0.019653],[0.922638, 0.999908, 0.003815, 0.002609, 0.832367, 0.850876, 0.873657, 0.012222],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.787643, 0.994614, 0.000946, 0.027908, 0.178192, 0.978897, 0.984711, 0.988205],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.939484, 0.999710, 0.005478, 0.000870, 0.298981, 0.762772, 0.206345, 0.215042],[0.975723, 0.999893, 0.001648, 0.002335, 0.657272, 0.960587, 0.433182, 0.039429],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.949066, 0.998367, 0.009796, 0.000290, 0.988815, 0.501862, 0.993622, 0.014511],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.973999, 0.999939, 0.001129, 0.003143, 0.857651, 0.954544, 0.790390, 0.014084],[0.707520, 0.952835, 0.000244, 0.041992, 0.222290, 0.991867, 0.995697, 0.999390],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.574905, 0.968796, 0.002380, 0.154190, 0.741287, 0.858200, 0.981064, 0.994583],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.974304, 0.998779, 0.004425, 0.000305, 0.989563, 0.553268, 0.991821, 0.012955],[0.707520, 0.952835, 0.000244, 0.041992, 0.222290, 0.991867, 0.995697, 0.999390],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.975723, 0.999893, 0.001648, 0.002335, 0.657272, 0.960587, 0.433182, 0.039429],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.990677, 0.999847, 0.000427, 0.002136, 0.902786, 0.978775, 0.538300, 0.020981],[0.975723, 0.999893, 0.001648, 0.002335, 0.657272, 0.960587, 0.433182, 0.039429],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.882355, 0.506104, 0.000031, 0.015427, 0.992233, 0.607513, 0.861603, 0.732346],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.349655, 0.995178, 0.007660, 0.016403, 0.478165, 0.968033, 0.998077, 0.814682],[0.693481, 0.783890, 0.009781, 0.000595, 0.785599, 0.005600, 0.369492, 0.392532],[0.871216, 0.999878, 0.009232, 0.006653, 0.284637, 0.860703, 0.905807, 0.109421],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.922638, 0.999908, 0.003815, 0.002609, 0.832367, 0.850876, 0.873657, 0.012222],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.994080, 0.999847, -0.000031, 0.006744, 0.906799, 0.998657, 0.144714, 0.056381],[0.974304, 0.998779, 0.004425, 0.000305, 0.989563, 0.553268, 0.991821, 0.012955],[0.954575, 0.801010, 0.001083, 0.000046, 0.712189, 0.159256, 0.019318, 0.927597],[0.696182, 0.799484, 0.007477, 0.000656, 0.853561, 0.005798, 0.508499, 0.297363],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.954575, 0.801010, 0.001083, 0.000046, 0.712189, 0.159256, 0.019318, 0.927597],[0.889496, 0.999802, 0.010178, 0.003647, 0.419754, 0.714966, 0.833466, 0.060272],[0.872177, 0.999908, 0.005249, 0.009277, 0.300461, 0.922607, 0.919662, 0.116989],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.973999, 0.999939, 0.001129, 0.003143, 0.857651, 0.954544, 0.790390, 0.014084],[0.693481, 0.783890, 0.009781, 0.000595, 0.785599, 0.005600, 0.369492, 0.392532],[0.990677, 0.999847, 0.000427, 0.002136, 0.902786, 0.978775, 0.538300, 0.020981],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.884933, 0.999863, 0.005264, 0.003967, 0.704391, 0.826279, 0.794556, 0.020187],[0.990677, 0.999847, 0.000427, 0.002136, 0.902786, 0.978775, 0.538300, 0.020981],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.871216, 0.999878, 0.009232, 0.006653, 0.284637, 0.860703, 0.905807, 0.109421],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.973999, 0.999939, 0.001129, 0.003143, 0.857651, 0.954544, 0.790390, 0.014084],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.041412, 0.991043, -0.000031, 0.015900, 0.918884, 0.960785, 0.988464, 0.923676],[0.871216, 0.999878, 0.009232, 0.006653, 0.284637, 0.860703, 0.905807, 0.109421],[0.969620, 1.000000, 0.000214, 0.004059, 0.614899, 0.999969, 0.001984, 0.989212],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.994080, 0.999847, -0.000031, 0.006744, 0.906799, 0.998657, 0.144714, 0.056381],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.349655, 0.995178, 0.007660, 0.016403, 0.478165, 0.968033, 0.998077, 0.814682],[0.913528, 0.996597, 0.014587, 0.000290, 0.991241, 0.560883, 0.993698, 0.020691],[0.707520, 0.952835, 0.000244, 0.041992, 0.222290, 0.991867, 0.995697, 0.999390],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.696182, 0.799484, 0.007477, 0.000656, 0.853561, 0.005798, 0.508499, 0.297363],[0.987274, 0.999908, 0.000031, 0.012817, 0.873932, 0.999481, 0.147873, 0.090332],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.872177, 0.999908, 0.005249, 0.009277, 0.300461, 0.922607, 0.919662, 0.116989],[0.806107, 0.999771, 0.005081, 0.010406, 0.340973, 0.909775, 0.987030, 0.086563],[0.939484, 0.999710, 0.005478, 0.000870, 0.298981, 0.762772, 0.206345, 0.215042],[0.696182, 0.799484, 0.007477, 0.000656, 0.853561, 0.005798, 0.508499, 0.297363],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.954575, 0.801010, 0.001083, 0.000046, 0.712189, 0.159256, 0.019318, 0.927597],[0.973999, 0.999939, 0.001129, 0.003143, 0.857651, 0.954544, 0.790390, 0.014084],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.957001, 0.999954, 0.001648, 0.006775, 0.607941, 0.973999, 0.771210, 0.032242],[0.872177, 0.999908, 0.005249, 0.009277, 0.300461, 0.922607, 0.919662, 0.116989],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.872177, 0.999908, 0.005249, 0.009277, 0.300461, 0.922607, 0.919662, 0.116989],[0.975723, 0.999893, 0.001648, 0.002335, 0.657272, 0.960587, 0.433182, 0.039429],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.889496, 0.999802, 0.010178, 0.003647, 0.419754, 0.714966, 0.833466, 0.060272],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.920517, 0.997818, 0.004761, 0.000275, 0.318375, 0.509796, 0.063858, 0.511337],[0.939484, 0.999710, 0.005478, 0.000870, 0.298981, 0.762772, 0.206345, 0.215042],[0.574905, 0.968796, 0.002380, 0.154190, 0.741287, 0.858200, 0.981064, 0.994583],[0.884933, 0.999863, 0.005264, 0.003967, 0.704391, 0.826279, 0.794556, 0.020187],[0.954575, 0.801010, 0.001083, 0.000046, 0.712189, 0.159256, 0.019318, 0.927597],[0.864212, 0.999908, 0.001923, 0.012527, 0.358627, 0.959610, 0.925919, 0.127960],[0.949249, 0.999878, 0.000259, 0.014221, 0.725311, 0.993027, 0.835602, 0.052032],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.992493, 0.998932, 0.001221, 0.000290, 0.992706, 0.687454, 0.983994, 0.013779],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.041412, 0.991043, -0.000031, 0.015900, 0.918884, 0.960785, 0.988464, 0.923676],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.041412, 0.991043, -0.000031, 0.015900, 0.918884, 0.960785, 0.988464, 0.923676],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.041412, 0.991043, -0.000031, 0.015900, 0.918884, 0.960785, 0.988464, 0.923676],[0.974304, 0.998779, 0.004425, 0.000305, 0.989563, 0.553268, 0.991821, 0.012955],[0.974304, 0.998779, 0.004425, 0.000305, 0.989563, 0.553268, 0.991821, 0.012955],[0.975723, 0.999893, 0.001648, 0.002335, 0.657272, 0.960587, 0.433182, 0.039429],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.992493, 0.998932, 0.001221, 0.000290, 0.992706, 0.687454, 0.983994, 0.013779],[0.973999, 0.999939, 0.001129, 0.003143, 0.857651, 0.954544, 0.790390, 0.014084],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.969620, 1.000000, 0.000214, 0.004059, 0.614899, 0.999969, 0.001984, 0.989212],[0.872177, 0.999908, 0.005249, 0.009277, 0.300461, 0.922607, 0.919662, 0.116989],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.872177, 0.999908, 0.005249, 0.009277, 0.300461, 0.922607, 0.919662, 0.116989],[0.693481, 0.783890, 0.009781, 0.000595, 0.785599, 0.005600, 0.369492, 0.392532],[0.912857, 0.999954, 0.004761, 0.006699, 0.387405, 0.932907, 0.846558, 0.059677],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.992493, 0.998932, 0.001221, 0.000290, 0.992706, 0.687454, 0.983994, 0.013779],[0.912857, 0.999954, 0.004761, 0.006699, 0.387405, 0.932907, 0.846558, 0.059677],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.990677, 0.999847, 0.000427, 0.002136, 0.902786, 0.978775, 0.538300, 0.020981],[0.920517, 0.997818, 0.004761, 0.000275, 0.318375, 0.509796, 0.063858, 0.511337],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.696182, 0.799484, 0.007477, 0.000656, 0.853561, 0.005798, 0.508499, 0.297363],[0.696182, 0.799484, 0.007477, 0.000656, 0.853561, 0.005798, 0.508499, 0.297363],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.806107, 0.999771, 0.005081, 0.010406, 0.340973, 0.909775, 0.987030, 0.086563],[0.949066, 0.998367, 0.009796, 0.000290, 0.988815, 0.501862, 0.993622, 0.014511],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.975723, 0.999893, 0.001648, 0.002335, 0.657272, 0.960587, 0.433182, 0.039429],[0.574905, 0.968796, 0.002380, 0.154190, 0.741287, 0.858200, 0.981064, 0.994583],[0.806107, 0.999771, 0.005081, 0.010406, 0.340973, 0.909775, 0.987030, 0.086563],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.975723, 0.999893, 0.001648, 0.002335, 0.657272, 0.960587, 0.433182, 0.039429],[0.696182, 0.799484, 0.007477, 0.000656, 0.853561, 0.005798, 0.508499, 0.297363],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.982056, 0.999908, 0.000244, 0.006699, 0.846207, 0.991959, 0.659744, 0.019653],[0.994080, 0.999847, -0.000031, 0.006744, 0.906799, 0.998657, 0.144714, 0.056381],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.990677, 0.999847, 0.000427, 0.002136, 0.902786, 0.978775, 0.538300, 0.020981],[0.693481, 0.783890, 0.009781, 0.000595, 0.785599, 0.005600, 0.369492, 0.392532],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.913528, 0.996597, 0.014587, 0.000290, 0.991241, 0.560883, 0.993698, 0.020691],[0.957001, 0.999954, 0.001648, 0.006775, 0.607941, 0.973999, 0.771210, 0.032242],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.922638, 0.999908, 0.003815, 0.002609, 0.832367, 0.850876, 0.873657, 0.012222],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.312424, 0.992294, 0.010620, 0.012360, 0.554459, 0.956268, 0.998138, 0.823547],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.787643, 0.994614, 0.000946, 0.027908, 0.178192, 0.978897, 0.984711, 0.988205],[0.987274, 0.999908, 0.000031, 0.012817, 0.873932, 0.999481, 0.147873, 0.090332],[0.992493, 0.998932, 0.001221, 0.000290, 0.992706, 0.687454, 0.983994, 0.013779],[0.974304, 0.998779, 0.004425, 0.000305, 0.989563, 0.553268, 0.991821, 0.012955],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.312424, 0.992294, 0.010620, 0.012360, 0.554459, 0.956268, 0.998138, 0.823547],[0.969620, 1.000000, 0.000214, 0.004059, 0.614899, 0.999969, 0.001984, 0.989212],[0.349655, 0.995178, 0.007660, 0.016403, 0.478165, 0.968033, 0.998077, 0.814682],[0.957001, 0.999954, 0.001648, 0.006775, 0.607941, 0.973999, 0.771210, 0.032242],[0.806107, 0.999771, 0.005081, 0.010406, 0.340973, 0.909775, 0.987030, 0.086563],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.882355, 0.506104, 0.000031, 0.015427, 0.992233, 0.607513, 0.861603, 0.732346],[0.430161, 0.910034, 0.001541, 0.000458, 0.997955, 0.862640, 0.993225, 0.024094],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.992493, 0.998932, 0.001221, 0.000290, 0.992706, 0.687454, 0.983994, 0.013779],[0.973999, 0.999939, 0.001129, 0.003143, 0.857651, 0.954544, 0.790390, 0.014084],[0.707520, 0.952835, 0.000244, 0.041992, 0.222290, 0.991867, 0.995697, 0.999390],[0.889496, 0.999802, 0.010178, 0.003647, 0.419754, 0.714966, 0.833466, 0.060272],[0.787643, 0.994614, 0.000946, 0.027908, 0.178192, 0.978897, 0.984711, 0.988205],[0.048477, 0.651978, 0.003937, 0.007721, 0.925690, 0.900894, 0.999619, 0.838852],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.913528, 0.996597, 0.014587, 0.000290, 0.991241, 0.560883, 0.993698, 0.020691],[0.864212, 0.999908, 0.001923, 0.012527, 0.358627, 0.959610, 0.925919, 0.127960],[0.990677, 0.999847, 0.000427, 0.002136, 0.902786, 0.978775, 0.538300, 0.020981],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.048477, 0.651978, 0.003937, 0.007721, 0.925690, 0.900894, 0.999619, 0.838852],[0.871216, 0.999878, 0.009232, 0.006653, 0.284637, 0.860703, 0.905807, 0.109421],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.574905, 0.968796, 0.002380, 0.154190, 0.741287, 0.858200, 0.981064, 0.994583],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.957001, 0.999954, 0.001648, 0.006775, 0.607941, 0.973999, 0.771210, 0.032242],[0.889496, 0.999802, 0.010178, 0.003647, 0.419754, 0.714966, 0.833466, 0.060272],[0.787643, 0.994614, 0.000946, 0.027908, 0.178192, 0.978897, 0.984711, 0.988205],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.864212, 0.999908, 0.001923, 0.012527, 0.358627, 0.959610, 0.925919, 0.127960],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.871216, 0.999878, 0.009232, 0.006653, 0.284637, 0.860703, 0.905807, 0.109421],[0.882355, 0.506104, 0.000031, 0.015427, 0.992233, 0.607513, 0.861603, 0.732346],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.987274, 0.999908, 0.000031, 0.012817, 0.873932, 0.999481, 0.147873, 0.090332],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.974304, 0.998779, 0.004425, 0.000305, 0.989563, 0.553268, 0.991821, 0.012955],[0.346146, 0.999390, 0.008026, 0.005707, 0.254257, 0.952637, 0.998398, 0.251038],[0.889496, 0.999802, 0.010178, 0.003647, 0.419754, 0.714966, 0.833466, 0.060272],[0.574905, 0.968796, 0.002380, 0.154190, 0.741287, 0.858200, 0.981064, 0.994583],[0.913528, 0.996597, 0.014587, 0.000290, 0.991241, 0.560883, 0.993698, 0.020691],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.992493, 0.998932, 0.001221, 0.000290, 0.992706, 0.687454, 0.983994, 0.013779],[0.041412, 0.991043, -0.000031, 0.015900, 0.918884, 0.960785, 0.988464, 0.923676],[0.806107, 0.999771, 0.005081, 0.010406, 0.340973, 0.909775, 0.987030, 0.086563],[0.973999, 0.999939, 0.001129, 0.003143, 0.857651, 0.954544, 0.790390, 0.014084],[0.912857, 0.999954, 0.004761, 0.006699, 0.387405, 0.932907, 0.846558, 0.059677],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.994080, 0.999847, -0.000031, 0.006744, 0.906799, 0.998657, 0.144714, 0.056381],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.574905, 0.968796, 0.002380, 0.154190, 0.741287, 0.858200, 0.981064, 0.994583],[0.954575, 0.801010, 0.001083, 0.000046, 0.712189, 0.159256, 0.019318, 0.927597],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.693481, 0.783890, 0.009781, 0.000595, 0.785599, 0.005600, 0.369492, 0.392532],[0.912857, 0.999954, 0.004761, 0.006699, 0.387405, 0.932907, 0.846558, 0.059677],[0.048477, 0.651978, 0.003937, 0.007721, 0.925690, 0.900894, 0.999619, 0.838852],[0.920517, 0.997818, 0.004761, 0.000275, 0.318375, 0.509796, 0.063858, 0.511337],[0.922638, 0.999908, 0.003815, 0.002609, 0.832367, 0.850876, 0.873657, 0.012222],[0.806107, 0.999771, 0.005081, 0.010406, 0.340973, 0.909775, 0.987030, 0.086563],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.912857, 0.999954, 0.004761, 0.006699, 0.387405, 0.932907, 0.846558, 0.059677],[0.430161, 0.910034, 0.001541, 0.000458, 0.997955, 0.862640, 0.993225, 0.024094],[0.707520, 0.952835, 0.000244, 0.041992, 0.222290, 0.991867, 0.995697, 0.999390],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.882355, 0.506104, 0.000031, 0.015427, 0.992233, 0.607513, 0.861603, 0.732346],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.969620, 1.000000, 0.000214, 0.004059, 0.614899, 0.999969, 0.001984, 0.989212],[0.048477, 0.651978, 0.003937, 0.007721, 0.925690, 0.900894, 0.999619, 0.838852],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.574905, 0.968796, 0.002380, 0.154190, 0.741287, 0.858200, 0.981064, 0.994583],[0.041412, 0.991043, -0.000031, 0.015900, 0.918884, 0.960785, 0.988464, 0.923676],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.992493, 0.998932, 0.001221, 0.000290, 0.992706, 0.687454, 0.983994, 0.013779],[0.973999, 0.999939, 0.001129, 0.003143, 0.857651, 0.954544, 0.790390, 0.014084],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.949066, 0.998367, 0.009796, 0.000290, 0.988815, 0.501862, 0.993622, 0.014511],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.787643, 0.994614, 0.000946, 0.027908, 0.178192, 0.978897, 0.984711, 0.988205],[0.574905, 0.968796, 0.002380, 0.154190, 0.741287, 0.858200, 0.981064, 0.994583],[0.949066, 0.998367, 0.009796, 0.000290, 0.988815, 0.501862, 0.993622, 0.014511],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.884933, 0.999863, 0.005264, 0.003967, 0.704391, 0.826279, 0.794556, 0.020187],[0.987274, 0.999908, 0.000031, 0.012817, 0.873932, 0.999481, 0.147873, 0.090332],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.346146, 0.999390, 0.008026, 0.005707, 0.254257, 0.952637, 0.998398, 0.251038],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.693481, 0.783890, 0.009781, 0.000595, 0.785599, 0.005600, 0.369492, 0.392532],[0.884933, 0.999863, 0.005264, 0.003967, 0.704391, 0.826279, 0.794556, 0.020187],[0.889496, 0.999802, 0.010178, 0.003647, 0.419754, 0.714966, 0.833466, 0.060272],[0.990677, 0.999847, 0.000427, 0.002136, 0.902786, 0.978775, 0.538300, 0.020981],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.048477, 0.651978, 0.003937, 0.007721, 0.925690, 0.900894, 0.999619, 0.838852],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.889496, 0.999802, 0.010178, 0.003647, 0.419754, 0.714966, 0.833466, 0.060272],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.920517, 0.997818, 0.004761, 0.000275, 0.318375, 0.509796, 0.063858, 0.511337],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.693481, 0.783890, 0.009781, 0.000595, 0.785599, 0.005600, 0.369492, 0.392532],[0.041412, 0.991043, -0.000031, 0.015900, 0.918884, 0.960785, 0.988464, 0.923676],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.766479, 0.997803, 0.030701, 0.001450, 0.552551, 0.247543, 0.624329, 0.307205],[0.346146, 0.999390, 0.008026, 0.005707, 0.254257, 0.952637, 0.998398, 0.251038],[0.872177, 0.999908, 0.005249, 0.009277, 0.300461, 0.922607, 0.919662, 0.116989],[0.975723, 0.999893, 0.001648, 0.002335, 0.657272, 0.960587, 0.433182, 0.039429],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.766479, 0.997803, 0.030701, 0.001450, 0.552551, 0.247543, 0.624329, 0.307205],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.992493, 0.998932, 0.001221, 0.000290, 0.992706, 0.687454, 0.983994, 0.013779],[0.949066, 0.998367, 0.009796, 0.000290, 0.988815, 0.501862, 0.993622, 0.014511],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.954575, 0.801010, 0.001083, 0.000046, 0.712189, 0.159256, 0.019318, 0.927597],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.922638, 0.999908, 0.003815, 0.002609, 0.832367, 0.850876, 0.873657, 0.012222],[0.974304, 0.998779, 0.004425, 0.000305, 0.989563, 0.553268, 0.991821, 0.012955],[0.349655, 0.995178, 0.007660, 0.016403, 0.478165, 0.968033, 0.998077, 0.814682],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.696182, 0.799484, 0.007477, 0.000656, 0.853561, 0.005798, 0.508499, 0.297363],[0.957001, 0.999954, 0.001648, 0.006775, 0.607941, 0.973999, 0.771210, 0.032242],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.954575, 0.801010, 0.001083, 0.000046, 0.712189, 0.159256, 0.019318, 0.927597],[0.872177, 0.999908, 0.005249, 0.009277, 0.300461, 0.922607, 0.919662, 0.116989],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.312424, 0.992294, 0.010620, 0.012360, 0.554459, 0.956268, 0.998138, 0.823547],[0.922638, 0.999908, 0.003815, 0.002609, 0.832367, 0.850876, 0.873657, 0.012222],[0.346146, 0.999390, 0.008026, 0.005707, 0.254257, 0.952637, 0.998398, 0.251038],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.884933, 0.999863, 0.005264, 0.003967, 0.704391, 0.826279, 0.794556, 0.020187],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.889496, 0.999802, 0.010178, 0.003647, 0.419754, 0.714966, 0.833466, 0.060272],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.974304, 0.998779, 0.004425, 0.000305, 0.989563, 0.553268, 0.991821, 0.012955],[0.974304, 0.998779, 0.004425, 0.000305, 0.989563, 0.553268, 0.991821, 0.012955],[0.430161, 0.910034, 0.001541, 0.000458, 0.997955, 0.862640, 0.993225, 0.024094],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.312424, 0.992294, 0.010620, 0.012360, 0.554459, 0.956268, 0.998138, 0.823547],[0.982056, 0.999908, 0.000244, 0.006699, 0.846207, 0.991959, 0.659744, 0.019653],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.430161, 0.910034, 0.001541, 0.000458, 0.997955, 0.862640, 0.993225, 0.024094],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.957001, 0.999954, 0.001648, 0.006775, 0.607941, 0.973999, 0.771210, 0.032242],[0.349655, 0.995178, 0.007660, 0.016403, 0.478165, 0.968033, 0.998077, 0.814682],[0.969620, 1.000000, 0.000214, 0.004059, 0.614899, 0.999969, 0.001984, 0.989212],[0.920517, 0.997818, 0.004761, 0.000275, 0.318375, 0.509796, 0.063858, 0.511337],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.882355, 0.506104, 0.000031, 0.015427, 0.992233, 0.607513, 0.861603, 0.732346],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.864212, 0.999908, 0.001923, 0.012527, 0.358627, 0.959610, 0.925919, 0.127960],[0.707520, 0.952835, 0.000244, 0.041992, 0.222290, 0.991867, 0.995697, 0.999390],[0.787643, 0.994614, 0.000946, 0.027908, 0.178192, 0.978897, 0.984711, 0.988205],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.975723, 0.999893, 0.001648, 0.002335, 0.657272, 0.960587, 0.433182, 0.039429],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.693481, 0.783890, 0.009781, 0.000595, 0.785599, 0.005600, 0.369492, 0.392532],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.349655, 0.995178, 0.007660, 0.016403, 0.478165, 0.968033, 0.998077, 0.814682],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.693481, 0.783890, 0.009781, 0.000595, 0.785599, 0.005600, 0.369492, 0.392532],[0.884933, 0.999863, 0.005264, 0.003967, 0.704391, 0.826279, 0.794556, 0.020187],[0.882355, 0.506104, 0.000031, 0.015427, 0.992233, 0.607513, 0.861603, 0.732346],[0.766479, 0.997803, 0.030701, 0.001450, 0.552551, 0.247543, 0.624329, 0.307205],[0.990677, 0.999847, 0.000427, 0.002136, 0.902786, 0.978775, 0.538300, 0.020981],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.974304, 0.998779, 0.004425, 0.000305, 0.989563, 0.553268, 0.991821, 0.012955],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.912857, 0.999954, 0.004761, 0.006699, 0.387405, 0.932907, 0.846558, 0.059677],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.922638, 0.999908, 0.003815, 0.002609, 0.832367, 0.850876, 0.873657, 0.012222],[0.969620, 1.000000, 0.000214, 0.004059, 0.614899, 0.999969, 0.001984, 0.989212],[0.922638, 0.999908, 0.003815, 0.002609, 0.832367, 0.850876, 0.873657, 0.012222],[0.574905, 0.968796, 0.002380, 0.154190, 0.741287, 0.858200, 0.981064, 0.994583],[0.707520, 0.952835, 0.000244, 0.041992, 0.222290, 0.991867, 0.995697, 0.999390],[0.974304, 0.998779, 0.004425, 0.000305, 0.989563, 0.553268, 0.991821, 0.012955],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.871216, 0.999878, 0.009232, 0.006653, 0.284637, 0.860703, 0.905807, 0.109421],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.974304, 0.998779, 0.004425, 0.000305, 0.989563, 0.553268, 0.991821, 0.012955],[0.430161, 0.910034, 0.001541, 0.000458, 0.997955, 0.862640, 0.993225, 0.024094],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.806107, 0.999771, 0.005081, 0.010406, 0.340973, 0.909775, 0.987030, 0.086563],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.787643, 0.994614, 0.000946, 0.027908, 0.178192, 0.978897, 0.984711, 0.988205],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.974304, 0.998779, 0.004425, 0.000305, 0.989563, 0.553268, 0.991821, 0.012955],[0.766479, 0.997803, 0.030701, 0.001450, 0.552551, 0.247543, 0.624329, 0.307205],[0.696182, 0.799484, 0.007477, 0.000656, 0.853561, 0.005798, 0.508499, 0.297363],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.806107, 0.999771, 0.005081, 0.010406, 0.340973, 0.909775, 0.987030, 0.086563],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.912857, 0.999954, 0.004761, 0.006699, 0.387405, 0.932907, 0.846558, 0.059677],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.912857, 0.999954, 0.004761, 0.006699, 0.387405, 0.932907, 0.846558, 0.059677],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.889496, 0.999802, 0.010178, 0.003647, 0.419754, 0.714966, 0.833466, 0.060272],[0.969620, 1.000000, 0.000214, 0.004059, 0.614899, 0.999969, 0.001984, 0.989212],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.954575, 0.801010, 0.001083, 0.000046, 0.712189, 0.159256, 0.019318, 0.927597],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.973999, 0.999939, 0.001129, 0.003143, 0.857651, 0.954544, 0.790390, 0.014084],[0.975723, 0.999893, 0.001648, 0.002335, 0.657272, 0.960587, 0.433182, 0.039429],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.957001, 0.999954, 0.001648, 0.006775, 0.607941, 0.973999, 0.771210, 0.032242],[0.992493, 0.998932, 0.001221, 0.000290, 0.992706, 0.687454, 0.983994, 0.013779],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.871216, 0.999878, 0.009232, 0.006653, 0.284637, 0.860703, 0.905807, 0.109421],[0.949249, 0.999878, 0.000259, 0.014221, 0.725311, 0.993027, 0.835602, 0.052032],[0.349655, 0.995178, 0.007660, 0.016403, 0.478165, 0.968033, 0.998077, 0.814682],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.864212, 0.999908, 0.001923, 0.012527, 0.358627, 0.959610, 0.925919, 0.127960],[0.912857, 0.999954, 0.004761, 0.006699, 0.387405, 0.932907, 0.846558, 0.059677],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.766479, 0.997803, 0.030701, 0.001450, 0.552551, 0.247543, 0.624329, 0.307205],[0.954575, 0.801010, 0.001083, 0.000046, 0.712189, 0.159256, 0.019318, 0.927597],[0.882355, 0.506104, 0.000031, 0.015427, 0.992233, 0.607513, 0.861603, 0.732346],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.957001, 0.999954, 0.001648, 0.006775, 0.607941, 0.973999, 0.771210, 0.032242],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.990677, 0.999847, 0.000427, 0.002136, 0.902786, 0.978775, 0.538300, 0.020981],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.696182, 0.799484, 0.007477, 0.000656, 0.853561, 0.005798, 0.508499, 0.297363],[0.787643, 0.994614, 0.000946, 0.027908, 0.178192, 0.978897, 0.984711, 0.988205],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.430161, 0.910034, 0.001541, 0.000458, 0.997955, 0.862640, 0.993225, 0.024094],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.806107, 0.999771, 0.005081, 0.010406, 0.340973, 0.909775, 0.987030, 0.086563],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.048477, 0.651978, 0.003937, 0.007721, 0.925690, 0.900894, 0.999619, 0.838852],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.913528, 0.996597, 0.014587, 0.000290, 0.991241, 0.560883, 0.993698, 0.020691],[0.990677, 0.999847, 0.000427, 0.002136, 0.902786, 0.978775, 0.538300, 0.020981],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.430161, 0.910034, 0.001541, 0.000458, 0.997955, 0.862640, 0.993225, 0.024094],[0.882355, 0.506104, 0.000031, 0.015427, 0.992233, 0.607513, 0.861603, 0.732346],[0.974304, 0.998779, 0.004425, 0.000305, 0.989563, 0.553268, 0.991821, 0.012955],[0.974304, 0.998779, 0.004425, 0.000305, 0.989563, 0.553268, 0.991821, 0.012955],[0.872177, 0.999908, 0.005249, 0.009277, 0.300461, 0.922607, 0.919662, 0.116989],[0.312424, 0.992294, 0.010620, 0.012360, 0.554459, 0.956268, 0.998138, 0.823547],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.707520, 0.952835, 0.000244, 0.041992, 0.222290, 0.991867, 0.995697, 0.999390],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.939484, 0.999710, 0.005478, 0.000870, 0.298981, 0.762772, 0.206345, 0.215042],[0.312424, 0.992294, 0.010620, 0.012360, 0.554459, 0.956268, 0.998138, 0.823547],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.864212, 0.999908, 0.001923, 0.012527, 0.358627, 0.959610, 0.925919, 0.127960],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.889496, 0.999802, 0.010178, 0.003647, 0.419754, 0.714966, 0.833466, 0.060272],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.969620, 1.000000, 0.000214, 0.004059, 0.614899, 0.999969, 0.001984, 0.989212],[0.957001, 0.999954, 0.001648, 0.006775, 0.607941, 0.973999, 0.771210, 0.032242],[0.969620, 1.000000, 0.000214, 0.004059, 0.614899, 0.999969, 0.001984, 0.989212],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.973999, 0.999939, 0.001129, 0.003143, 0.857651, 0.954544, 0.790390, 0.014084],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.969620, 1.000000, 0.000214, 0.004059, 0.614899, 0.999969, 0.001984, 0.989212],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.990677, 0.999847, 0.000427, 0.002136, 0.902786, 0.978775, 0.538300, 0.020981],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.954575, 0.801010, 0.001083, 0.000046, 0.712189, 0.159256, 0.019318, 0.927597],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.884933, 0.999863, 0.005264, 0.003967, 0.704391, 0.826279, 0.794556, 0.020187],[0.346146, 0.999390, 0.008026, 0.005707, 0.254257, 0.952637, 0.998398, 0.251038],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.969620, 1.000000, 0.000214, 0.004059, 0.614899, 0.999969, 0.001984, 0.989212],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.041412, 0.991043, -0.000031, 0.015900, 0.918884, 0.960785, 0.988464, 0.923676],[0.787643, 0.994614, 0.000946, 0.027908, 0.178192, 0.978897, 0.984711, 0.988205],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.949249, 0.999878, 0.000259, 0.014221, 0.725311, 0.993027, 0.835602, 0.052032],[0.912857, 0.999954, 0.004761, 0.006699, 0.387405, 0.932907, 0.846558, 0.059677],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.048477, 0.651978, 0.003937, 0.007721, 0.925690, 0.900894, 0.999619, 0.838852],[0.913528, 0.996597, 0.014587, 0.000290, 0.991241, 0.560883, 0.993698, 0.020691],[0.990677, 0.999847, 0.000427, 0.002136, 0.902786, 0.978775, 0.538300, 0.020981],[0.990677, 0.999847, 0.000427, 0.002136, 0.902786, 0.978775, 0.538300, 0.020981],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.787643, 0.994614, 0.000946, 0.027908, 0.178192, 0.978897, 0.984711, 0.988205],[0.957001, 0.999954, 0.001648, 0.006775, 0.607941, 0.973999, 0.771210, 0.032242],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.312424, 0.992294, 0.010620, 0.012360, 0.554459, 0.956268, 0.998138, 0.823547],[0.987274, 0.999908, 0.000031, 0.012817, 0.873932, 0.999481, 0.147873, 0.090332],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.990677, 0.999847, 0.000427, 0.002136, 0.902786, 0.978775, 0.538300, 0.020981],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.872177, 0.999908, 0.005249, 0.009277, 0.300461, 0.922607, 0.919662, 0.116989],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.041412, 0.991043, -0.000031, 0.015900, 0.918884, 0.960785, 0.988464, 0.923676],[0.766479, 0.997803, 0.030701, 0.001450, 0.552551, 0.247543, 0.624329, 0.307205],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.969620, 1.000000, 0.000214, 0.004059, 0.614899, 0.999969, 0.001984, 0.989212],[0.430161, 0.910034, 0.001541, 0.000458, 0.997955, 0.862640, 0.993225, 0.024094],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.884933, 0.999863, 0.005264, 0.003967, 0.704391, 0.826279, 0.794556, 0.020187],[0.889496, 0.999802, 0.010178, 0.003647, 0.419754, 0.714966, 0.833466, 0.060272],[0.987274, 0.999908, 0.000031, 0.012817, 0.873932, 0.999481, 0.147873, 0.090332],[0.912857, 0.999954, 0.004761, 0.006699, 0.387405, 0.932907, 0.846558, 0.059677],[0.872177, 0.999908, 0.005249, 0.009277, 0.300461, 0.922607, 0.919662, 0.116989],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.975723, 0.999893, 0.001648, 0.002335, 0.657272, 0.960587, 0.433182, 0.039429],[0.346146, 0.999390, 0.008026, 0.005707, 0.254257, 0.952637, 0.998398, 0.251038],[0.349655, 0.995178, 0.007660, 0.016403, 0.478165, 0.968033, 0.998077, 0.814682],[0.707520, 0.952835, 0.000244, 0.041992, 0.222290, 0.991867, 0.995697, 0.999390],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.696182, 0.799484, 0.007477, 0.000656, 0.853561, 0.005798, 0.508499, 0.297363],[0.574905, 0.968796, 0.002380, 0.154190, 0.741287, 0.858200, 0.981064, 0.994583],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.312424, 0.992294, 0.010620, 0.012360, 0.554459, 0.956268, 0.998138, 0.823547],[0.990677, 0.999847, 0.000427, 0.002136, 0.902786, 0.978775, 0.538300, 0.020981],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.312424, 0.992294, 0.010620, 0.012360, 0.554459, 0.956268, 0.998138, 0.823547],[0.975723, 0.999893, 0.001648, 0.002335, 0.657272, 0.960587, 0.433182, 0.039429],[0.701233, 0.832733, 0.013962, 0.000565, 0.710510, 0.005859, 0.282486, 0.448029],[0.949066, 0.998367, 0.009796, 0.000290, 0.988815, 0.501862, 0.993622, 0.014511],[0.619415, 0.999146, 0.002182, 0.000366, 0.988724, 0.443848, 0.121338, 0.062943],[0.954575, 0.801010, 0.001083, 0.000046, 0.712189, 0.159256, 0.019318, 0.927597],[0.950745, 0.999954, 0.003036, 0.004471, 0.641434, 0.944855, 0.747833, 0.020813],[0.970688, 0.999908, 0.000244, 0.009842, 0.794510, 0.993271, 0.738373, 0.030197],[0.949066, 0.998367, 0.009796, 0.000290, 0.988815, 0.501862, 0.993622, 0.014511],[0.693481, 0.783890, 0.009781, 0.000595, 0.785599, 0.005600, 0.369492, 0.392532],[0.949249, 0.999878, 0.000259, 0.014221, 0.725311, 0.993027, 0.835602, 0.052032],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.912857, 0.999954, 0.004761, 0.006699, 0.387405, 0.932907, 0.846558, 0.059677],[0.912857, 0.999954, 0.004761, 0.006699, 0.387405, 0.932907, 0.846558, 0.059677],[0.864212, 0.999908, 0.001923, 0.012527, 0.358627, 0.959610, 0.925919, 0.127960],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.920517, 0.997818, 0.004761, 0.000275, 0.318375, 0.509796, 0.063858, 0.511337],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.430161, 0.910034, 0.001541, 0.000458, 0.997955, 0.862640, 0.993225, 0.024094],[0.872177, 0.999908, 0.005249, 0.009277, 0.300461, 0.922607, 0.919662, 0.116989],[0.882355, 0.506104, 0.000031, 0.015427, 0.992233, 0.607513, 0.861603, 0.732346],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.939484, 0.999710, 0.005478, 0.000870, 0.298981, 0.762772, 0.206345, 0.215042],[0.707520, 0.952835, 0.000244, 0.041992, 0.222290, 0.991867, 0.995697, 0.999390],[0.864212, 0.999908, 0.001923, 0.012527, 0.358627, 0.959610, 0.925919, 0.127960],[0.987274, 0.999908, 0.000031, 0.012817, 0.873932, 0.999481, 0.147873, 0.090332],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.970474, 1.000061, 0.000076, 0.021667, 0.747971, 0.999893, 0.083359, 0.491547],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.312424, 0.992294, 0.010620, 0.012360, 0.554459, 0.956268, 0.998138, 0.823547],[0.987274, 0.999908, 0.000031, 0.012817, 0.873932, 0.999481, 0.147873, 0.090332],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.973999, 0.999939, 0.001129, 0.003143, 0.857651, 0.954544, 0.790390, 0.014084],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.912857, 0.999954, 0.004761, 0.006699, 0.387405, 0.932907, 0.846558, 0.059677],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.696182, 0.799484, 0.007477, 0.000656, 0.853561, 0.005798, 0.508499, 0.297363],[0.992493, 0.998932, 0.001221, 0.000290, 0.992706, 0.687454, 0.983994, 0.013779],[0.922638, 0.999908, 0.003815, 0.002609, 0.832367, 0.850876, 0.873657, 0.012222],[0.349655, 0.995178, 0.007660, 0.016403, 0.478165, 0.968033, 0.998077, 0.814682],[0.987274, 0.999908, 0.000031, 0.012817, 0.873932, 0.999481, 0.147873, 0.090332],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.912857, 0.999954, 0.004761, 0.006699, 0.387405, 0.932907, 0.846558, 0.059677],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.349655, 0.995178, 0.007660, 0.016403, 0.478165, 0.968033, 0.998077, 0.814682],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.854340, 0.999420, 0.093811, 0.000565, 0.215530, 0.068375, 0.232391, 0.814072],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.954575, 0.801010, 0.001083, 0.000046, 0.712189, 0.159256, 0.019318, 0.927597],[0.707520, 0.952835, 0.000244, 0.041992, 0.222290, 0.991867, 0.995697, 0.999390],[0.912857, 0.999954, 0.004761, 0.006699, 0.387405, 0.932907, 0.846558, 0.059677],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.430161, 0.910034, 0.001541, 0.000458, 0.997955, 0.862640, 0.993225, 0.024094],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.430161, 0.910034, 0.001541, 0.000458, 0.997955, 0.862640, 0.993225, 0.024094],[0.510880, 0.670258, 0.019485, 0.035248, 0.975449, 0.948120, 0.044708, 0.961197],[0.496780, 0.998413, -0.000031, 0.017273, 0.129242, 0.731201, 0.981186, 0.897369],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.889496, 0.999802, 0.010178, 0.003647, 0.419754, 0.714966, 0.833466, 0.060272],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.954575, 0.801010, 0.001083, 0.000046, 0.712189, 0.159256, 0.019318, 0.927597],[0.872177, 0.999908, 0.005249, 0.009277, 0.300461, 0.922607, 0.919662, 0.116989],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.949249, 0.999878, 0.000259, 0.014221, 0.725311, 0.993027, 0.835602, 0.052032],[0.864212, 0.999908, 0.001923, 0.012527, 0.358627, 0.959610, 0.925919, 0.127960],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.776123, 0.997101, 0.032440, 0.001312, 0.739197, 0.236404, 0.531082, 0.196533],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.787643, 0.994614, 0.000946, 0.027908, 0.178192, 0.978897, 0.984711, 0.988205],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.896347, 0.999832, 0.007553, 0.003662, 0.554230, 0.753265, 0.805237, 0.035828],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.957001, 0.999954, 0.001648, 0.006775, 0.607941, 0.973999, 0.771210, 0.032242],[0.349655, 0.995178, 0.007660, 0.016403, 0.478165, 0.968033, 0.998077, 0.814682],[0.737244, 0.938568, 0.026016, 0.000565, 0.580963, 0.007324, 0.227219, 0.479019],[0.884933, 0.999863, 0.005264, 0.003967, 0.704391, 0.826279, 0.794556, 0.020187],[0.978439, 0.999954, 0.000046, 0.019180, 0.833206, 0.999756, 0.143265, 0.179077],[0.949066, 0.998367, 0.009796, 0.000290, 0.988815, 0.501862, 0.993622, 0.014511],[0.574905, 0.968796, 0.002380, 0.154190, 0.741287, 0.858200, 0.981064, 0.994583],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.696182, 0.799484, 0.007477, 0.000656, 0.853561, 0.005798, 0.508499, 0.297363],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.987274, 0.999908, 0.000031, 0.012817, 0.873932, 0.999481, 0.147873, 0.090332],[0.199219, 0.972794, 0.010056, 0.008896, 0.709335, 0.932587, 0.998703, 0.844238],[0.737579, 0.998856, 0.017914, 0.000992, 0.948883, 0.431915, 0.251511, 0.091187],[0.171021, 0.921677, 0.000076, 0.000320, 0.967545, 0.254883, 0.035904, 0.895370],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.949066, 0.998367, 0.009796, 0.000290, 0.988815, 0.501862, 0.993622, 0.014511],[0.917694, 0.999954, 0.005859, 0.004807, 0.494080, 0.899567, 0.788788, 0.033737],[0.999023, 0.999207, 0.000275, 0.000351, 0.994980, 0.937042, 0.820694, 0.033722],[0.951859, 0.999939, 0.002365, 0.002777, 0.829865, 0.911774, 0.840591, 0.013535],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.048477, 0.651978, 0.003937, 0.007721, 0.925690, 0.900894, 0.999619, 0.838852],[0.954575, 0.801010, 0.001083, 0.000046, 0.712189, 0.159256, 0.019318, 0.927597],[0.349655, 0.995178, 0.007660, 0.016403, 0.478165, 0.968033, 0.998077, 0.814682],[0.954575, 0.801010, 0.001083, 0.000046, 0.712189, 0.159256, 0.019318, 0.927597],[0.041412, 0.991043, -0.000031, 0.015900, 0.918884, 0.960785, 0.988464, 0.923676],[0.882355, 0.506104, 0.000031, 0.015427, 0.992233, 0.607513, 0.861603, 0.732346],[0.936905, 0.999390, 0.000885, 0.013031, 0.651382, 0.623383, 0.980804, 0.036102],[0.781769, 0.998276, 0.003036, 0.018723, 0.189133, 0.958984, 0.977341, 0.924988],[0.913528, 0.996597, 0.014587, 0.000290, 0.991241, 0.560883, 0.993698, 0.020691],[0.780090, 0.998856, 0.005722, 0.012100, 0.213867, 0.918777, 0.970200, 0.803696],[0.766479, 0.997803, 0.030701, 0.001450, 0.552551, 0.247543, 0.624329, 0.307205],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.994080, 0.999847, -0.000031, 0.006744, 0.906799, 0.998657, 0.144714, 0.056381],[0.884933, 0.999863, 0.005264, 0.003967, 0.704391, 0.826279, 0.794556, 0.020187],[0.766479, 0.997803, 0.030701, 0.001450, 0.552551, 0.247543, 0.624329, 0.307205],[0.994080, 0.999847, -0.000031, 0.006744, 0.906799, 0.998657, 0.144714, 0.056381],[0.766235, 0.997589, 0.028198, 0.001205, 0.868637, 0.292511, 0.407135, 0.135208],[0.775177, 0.998718, 0.007584, 0.006485, 0.255585, 0.810181, 0.953186, 0.682709],[0.635834, 0.791473, 0.000671, 0.002014, 0.988632, 0.561447, 0.964066, 0.817322],[0.806107, 0.999771, 0.005081, 0.010406, 0.340973, 0.909775, 0.987030, 0.086563],[0.913528, 0.996597, 0.014587, 0.000290, 0.991241, 0.560883, 0.993698, 0.020691]]
dif_sum = 0
max_error = 0
min_error = 1
for i in range(1000):
index_error = 0
for j in range(8):
index_error = index_error + np.abs(output_set1[i][j] - output_set2[i][j])
index_error = index_error/8.0
if index_error > max_error:
max_error = index_error
if index_error < min_error:
min_error = index_error
dif_sum = dif_sum + index_error
dif_sum = dif_sum/1000.0
print "Average difference:", dif_sum
print "Greatest difference:", max_error
print "Smallest difference:", min_error
print "Correlation:", (1 - dif_sum) * 100
print "Worst correlation:", (1 - max_error) * 100
print "Best correlation:", (1 - min_error) * 100
# -------- END GAME ---------
# GA RUN
# ga = GA(generations=100, size=50, game_rounds=50, game_exits=4, game_move=0, filename="attempt")
# start = timer()
#
# times = 5
# for i in range(0, times):
# print "<=====================> Starting ANN set number:", i, "<=====================>"
# ga.set_filename("ANN_26_03_0\Attempt_" + str(i))
# ga.gen_pop()
# ga.set_animals([20, 35, 8], [20, 35, 8], [20, 30, 4])
# ga.optimize()
#
# end = timer()
# print "Set time taken:", (end - start), "s"
#
# end = timer()
# print "Total time taken:", (end - start), "s"
# -------- END GA ---------
# ASYNC 1 RUN
# start = timer()
#
# game = AGame(size=10, rounds=1000, turns=10, num_exits=3)
# # for i in range(5):
# # name1 = "ANN_26_03_0\Attempt_"
# # name2 = "_100_50.txt"
# # name = name1 + str(i) + name2
# # game.evaluate_file(100, name, debug=True, output_name='scoring_async')
# #
# # end = timer()
# # print "Time taken:", (end - start)
# game.import_from_file("ANN_26_03_0\Attempt_2_100_50.txt", 95)
# game.start(pause=False)
# -------- END ASYNC 1 ---------
# ASYNC 2 RUN
# start = timer()
#
# game = AGame2(size=10, rounds=10000, turns=10, num_exits=3)
# # for i in range(5):
# # name1 = "ANN_26_03_0\Attempt_"
# # name2 = "_100_50.txt"
# # name = name1 + str(i) + name2
# # game.evaluate_file(100, name, debug=True, output_name='scoring_async_2')
# #
# # end = timer()
# # print "Time taken:", (end - start)
# game.import_from_file("ANN_22_03_0\Attempt_2_100_50.txt", 98)
# game.start(pause=False)
# -------- END ASYNC 2 ---------
| 932.66426
| 167,700
| 0.784279
| 33,786
| 258,348
| 5.992127
| 0.033387
| 0.003556
| 0.004638
| 0.005552
| 0.988382
| 0.986624
| 0.985157
| 0.983966
| 0.983403
| 0.983398
| 0
| 0.82441
| 0.067928
| 258,348
| 276
| 167,701
| 936.043478
| 0.016333
| 0.029062
| 0
| 0.068182
| 0
| 0
| 0.000551
| 0.000128
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.159091
| null | null | 0.136364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
ce467b9933aa51781f7b36708c2f6c1ad3ef72a3
| 154
|
py
|
Python
|
vmtproc/__init__.py
|
PMArkive/py-vmtoffsdump
|
79da83a8e659842249981ad2e052e1e8a9192d64
|
[
"MIT"
] | 1
|
2021-05-16T17:47:07.000Z
|
2021-05-16T17:47:07.000Z
|
vmtproc/__init__.py
|
PMArkive/py-vmtoffsdump
|
79da83a8e659842249981ad2e052e1e8a9192d64
|
[
"MIT"
] | null | null | null |
vmtproc/__init__.py
|
PMArkive/py-vmtoffsdump
|
79da83a8e659842249981ad2e052e1e8a9192d64
|
[
"MIT"
] | 1
|
2021-05-21T15:23:31.000Z
|
2021-05-21T15:23:31.000Z
|
#!/usr/bin/python3
from .dumper import VTableProcessor as VTableProcessor
# backwards compatibility
from .dumper import VTableProcessor as VTableDumper
| 22
| 54
| 0.831169
| 17
| 154
| 7.529412
| 0.647059
| 0.15625
| 0.25
| 0.484375
| 0.515625
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007353
| 0.116883
| 154
| 6
| 55
| 25.666667
| 0.933824
| 0.266234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ce4c8f7a379220d21b1037faa9db1d6461c37e5a
| 39,450
|
py
|
Python
|
learn/model/model.py
|
fuyuanlyu/OptInter
|
95abda78261818e093dabe508d3609806372f2a5
|
[
"Apache-2.0"
] | 1
|
2022-03-15T08:52:09.000Z
|
2022-03-15T08:52:09.000Z
|
learn/model/model.py
|
fuyuanlyu/OptInter
|
95abda78261818e093dabe508d3609806372f2a5
|
[
"Apache-2.0"
] | null | null | null |
learn/model/model.py
|
fuyuanlyu/OptInter
|
95abda78261818e093dabe508d3609806372f2a5
|
[
"Apache-2.0"
] | 1
|
2022-03-22T10:37:31.000Z
|
2022-03-22T10:37:31.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import drop_path
import numpy as np
import sys
class LR(nn.Module):
def __init__(self, cont_field, cate_field, cate_cont_feature,
device=torch.device('cpu'), lamb=0.):
super(LR, self).__init__()
self.cont_field = cont_field
self.cate_field = cate_field
self.cate_cont_feature = cate_cont_feature
self.device = device
self.lamb = lamb
# Create embedding table
self.cate_embeddings_table = \
nn.Embedding(self.cate_cont_feature, 1)
# Initialize
for name, tensor in self.cate_embeddings_table.named_parameters():
if 'weight' in name:
a = np.square(3/(self.cate_field * 1))
nn.init.uniform_(tensor, -a, a)
def forward(self, conts, cates, combs):
# Assert the batch sizes are the same
batch_size = conts.size()[0]
assert batch_size == cates.size()[0]
# Get continuous, categorical and free combinad embeddings
cont_embedding = torch.IntTensor(np.arange(self.cont_field))\
.expand_as(conts).to(self.device)
cont_embedding = self.cate_embeddings_table(cont_embedding)
conts = conts.unsqueeze(2)
cont_embedding = torch.mul(cont_embedding, conts)
cate_embedding = self.cate_embeddings_table(cates)
# Compute and reshape combined embeddings
X = torch.cat((cont_embedding, cate_embedding), 1).reshape(batch_size, -1)
# LR part
logit = torch.sigmoid(torch.sum(X, dim=1, keepdim=True))
return logit
def l2_penalty(self, conts, cates, combs):
# Assert the batch sizes are the same
batch_size = conts.size()[0]
assert batch_size == (cates.size()[0])
conts = conts.reshape(batch_size, -1)
cates = cates.reshape(batch_size, -1)
# Get continuous, categorical and free combinad embeddings
cont_embedding = torch.IntTensor(np.arange(self.cont_field))\
.expand_as(conts).to(self.device)
cont_embedding = self.cate_embeddings_table(cont_embedding)
conts = conts.unsqueeze(2)
cont_embedding = torch.mul(cont_embedding, conts)
cate_embedding = self.cate_embeddings_table(cates)
# Compute and reshape combined embeddings
X = torch.cat((cont_embedding, cate_embedding), 1).reshape(batch_size, -1)
# Calculate L2
L2 = torch.pow(X, 2) * self.lamb
L2 = L2.sum()
return L2
class FM(nn.Module):
def __init__(self, cont_field, cate_field, cate_cont_feature,
orig_embedding_dim=40, hidden_dims=[100,100],
device=torch.device('cpu'), lamb=0.):
super(FM, self).__init__()
self.cont_field = cont_field
self.cate_field = cate_field
self.cate_cont_feature = cate_cont_feature
self.orig_embedding_dim = orig_embedding_dim
self.device = device
self.lamb = lamb
# Create embedding table
self.cate_embeddings_table = \
nn.Embedding(self.cate_cont_feature, self.orig_embedding_dim)
for name, tensor in self.cate_embeddings_table.named_parameters():
if 'weight' in name:
a = np.square(3/(self.cate_field * self.orig_embedding_dim))
nn.init.uniform_(tensor, -a, a)
def forward(self, conts, cates, combs):
# Assert the batch sizes are the same
batch_size = conts.size()[0]
assert batch_size == cates.size()[0]
# Get continuous, categorical and free combinad embeddings
cont_embedding = torch.IntTensor(np.arange(self.cont_field))\
.expand_as(conts).to(self.device)
cont_embedding = self.cate_embeddings_table(cont_embedding)
conts = conts.unsqueeze(2)
cont_embedding = torch.mul(cont_embedding, conts)
cate_embedding = self.cate_embeddings_table(cates)
# FM part
cont_cate_embedding = torch.cat((cont_embedding, cate_embedding), 1)
square_of_sum = torch.sum(cont_cate_embedding, dim=1) ** 2
sum_of_square = torch.sum(cont_cate_embedding ** 2, dim=1)
ix = square_of_sum - sum_of_square
ix = 0.5 * ix
X_FM = torch.sum(ix, dim=1, keepdim=True)
logit = torch.sigmoid(X_FM)
return logit
def l2_penalty(self, conts, cates, combs):
# Assert the batch sizes are the same
batch_size = conts.size()[0]
assert batch_size == (cates.size()[0])
conts = conts.reshape(batch_size, -1)
cates = cates.reshape(batch_size, -1)
# Get continuous, categorical and free combinad embeddings
cont_embedding = torch.IntTensor(np.arange(self.cont_field))\
.expand_as(conts).to(self.device)
cont_embedding = self.cate_embeddings_table(cont_embedding)
conts = conts.unsqueeze(2)
cont_embedding = torch.mul(cont_embedding, conts)
cate_embedding = self.cate_embeddings_table(cates)
# Compute and reshape combined embeddings
X = torch.cat((cont_embedding, cate_embedding), 1).reshape(batch_size, -1)
# Calculate L2
L2 = torch.pow(X, 2) * self.lamb
L2 = L2.sum()
return L2
class Poly2(nn.Module):
def __init__(self, cont_field, cate_field, comb_field, cate_cont_feature, comb_feature,
device=torch.device('cpu'), lamb=0.):
super(Poly2, self).__init__()
self.cont_field = cont_field
self.cate_field = cate_field
self.comb_field = comb_field
self.cate_cont_feature = cate_cont_feature
self.comb_feature = comb_feature
self.device = device
self.lamb = lamb
# Create embedding table
self.cate_embeddings_table = \
nn.Embedding(self.cate_cont_feature, 1)
self.comb_embeddings_table = \
nn.Embedding(self.comb_feature, 1)
# Initialize
for name, tensor in self.cate_embeddings_table.named_parameters():
if 'weight' in name:
a = np.square(3/(self.cate_field * 1))
nn.init.uniform_(tensor, -a, a)
for name, tensor in self.comb_embeddings_table.named_parameters():
if 'weight' in name:
a = np.square(3/(self.comb_field * 1))
nn.init.uniform_(tensor, -a, a)
def forward(self, conts, cates, combs):
# Assert the batch sizes are the same
batch_size = conts.size()[0]
assert batch_size == cates.size()[0]
assert batch_size == combs.size()[0]
# Get continuous, categorical and free combined embeddings
cont_embedding = torch.IntTensor(np.arange(self.cont_field))\
.expand_as(conts).to(self.device)
cont_embedding = self.cate_embeddings_table(cont_embedding)
conts = conts.unsqueeze(2)
cont_embedding = torch.mul(cont_embedding, conts)
cate_embedding = self.cate_embeddings_table(cates)
# Reshape all embeddings
# Compute original features
cont_cate_embedding = torch.cat((cont_embedding, cate_embedding), 1) \
.type(torch.FloatTensor).to(self.device)
comb_embedding = self.comb_embeddings_table(combs)
# Compute final X as model input
X = torch.cat((cont_cate_embedding.reshape(batch_size, -1),
comb_embedding.reshape(batch_size, -1)), 1)\
.type(torch.FloatTensor).to(self.device)
# LR part
logit = torch.sigmoid(torch.sum(X, dim=1, keepdim=True))
return logit
def l2_penalty(self, conts, cates, combs):
# Assert the batch sizes are the same
batch_size = conts.size()[0]
assert batch_size == (cates.size()[0])
conts = conts.reshape(batch_size, -1)
cates = cates.reshape(batch_size, -1)
# Get continuous, categorical and free combinad embeddings
cont_embedding = torch.IntTensor(np.arange(self.cont_field))\
.expand_as(conts).to(self.device)
cont_embedding = self.cate_embeddings_table(cont_embedding)
conts = conts.unsqueeze(2)
cont_embedding = torch.mul(cont_embedding, conts)
cate_embedding = self.cate_embeddings_table(cates)
comb_embedding = self.comb_embeddings_table(combs)
# Compute and reshape combined embeddings
X = torch.cat((cont_embedding, cate_embedding, comb_embedding), 1).reshape(batch_size, -1)
# Calculate L2
L2 = torch.pow(X, 2) * self.lamb
L2 = L2.sum()
return L2
class FNN(nn.Module):
def __init__(self, cont_field, cate_field, cate_cont_feature,
orig_embedding_dim=40, hidden_dims=[100,100],
device=torch.device('cpu'), lamb=0.):
super(FNN, self).__init__()
self.cont_field = cont_field
self.cate_field = cate_field
self.cate_cont_feature = cate_cont_feature
self.orig_embedding_dim = orig_embedding_dim
self.device = device
self.lamb = lamb
# Create embedding table
self.cate_embeddings_table = \
nn.Embedding(self.cate_cont_feature, self.orig_embedding_dim)
# Create layers
self.fc_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
first_layer_neurons = self.orig_embedding_dim * \
(self.cate_field + self.cont_field)
self.fc_layers.append(nn.Linear(first_layer_neurons, hidden_dims[0]))
for _, (in_size, out_size) in enumerate(zip(hidden_dims[:-1], hidden_dims[1:])):
self.fc_layers.append(nn.Linear(in_size, out_size))
for _, size in enumerate(hidden_dims):
self.norm_layers.append(nn.LayerNorm(size))
self.output_layer = nn.Linear(hidden_dims[-1], 1)
for name, tensor in self.fc_layers.named_parameters():
if 'weight' in name:
nn.init.xavier_uniform_(tensor, gain=1)
for name, tensor in self.output_layer.named_parameters():
if 'weight' in name:
nn.init.xavier_uniform_(tensor, gain=1)
for name, tensor in self.cate_embeddings_table.named_parameters():
if 'weight' in name:
a = np.square(3/(self.cate_field * self.orig_embedding_dim))
nn.init.uniform_(tensor, -a, a)
def forward(self, conts, cates, combs):
# Assert the batch sizes are the same
batch_size = conts.size()[0]
assert batch_size == (cates.size()[0])
conts = conts.reshape(batch_size, -1)
cates = cates.reshape(batch_size, -1)
# Get continuous, categorical and free combinad embeddings
cont_embedding = torch.IntTensor(np.arange(self.cont_field))\
.expand_as(conts).to(self.device)
cont_embedding = self.cate_embeddings_table(cont_embedding)
conts = conts.unsqueeze(2)
cont_embedding = torch.mul(cont_embedding, conts)
cate_embedding = self.cate_embeddings_table(cates)
# Compute and reshape combined embeddings
X = torch.cat((cont_embedding, cate_embedding), 1).reshape(batch_size, -1)
# Pass to FC layers
for idx in range(len(self.fc_layers)):
X = self.fc_layers[idx](X)
X = self.norm_layers[idx](X)
X = F.relu(X)
logit = self.output_layer(X)
logit = torch.sigmoid(logit)
return logit
def l2_penalty(self, conts, cates, combs):
# Assert the batch sizes are the same
batch_size = conts.size()[0]
assert batch_size == (cates.size()[0])
conts = conts.reshape(batch_size, -1)
cates = cates.reshape(batch_size, -1)
# Get continuous, categorical and free combinad embeddings
cont_embedding = torch.IntTensor(np.arange(self.cont_field))\
.expand_as(conts).to(self.device)
cont_embedding = self.cate_embeddings_table(cont_embedding)
conts = conts.unsqueeze(2)
cont_embedding = torch.mul(cont_embedding, conts)
cate_embedding = self.cate_embeddings_table(cates)
# Compute and reshape combined embeddings
X = torch.cat((cont_embedding, cate_embedding), 1).reshape(batch_size, -1)
# Calculate L2
L2 = torch.pow(X, 2) * self.lamb
L2 = L2.sum()
return L2
class IPNN(nn.Module):
def __init__(self, cont_field, cate_field, cate_cont_feature,
orig_embedding_dim=40, hidden_dims=[100,100],
device=torch.device('cpu'), lamb=0.):
super(IPNN, self).__init__()
self.cont_field = cont_field
self.cate_field = cate_field
self.cate_cont_feature = cate_cont_feature
self.orig_embedding_dim = orig_embedding_dim
self.device = device
self.lamb = lamb
# Compute comb_field
self.cont_cate_field = self.cate_field + self.cont_field
self.comb_field = int(self.cont_cate_field * (self.cont_cate_field - 1) / 2)
# Create embedding table
self.cate_embeddings_table = \
nn.Embedding(self.cate_cont_feature, self.orig_embedding_dim)
# Create layers
self.fc_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
first_layer_neurons = self.orig_embedding_dim * \
(self.cate_field + self.cont_field) + self.comb_field
self.fc_layers.append(nn.Linear(first_layer_neurons, hidden_dims[0]))
for _, (in_size, out_size) in enumerate(zip(hidden_dims[:-1], hidden_dims[1:])):
self.fc_layers.append(nn.Linear(in_size, out_size))
for _, size in enumerate(hidden_dims):
self.norm_layers.append(nn.LayerNorm(size))
self.output_layer = nn.Linear(hidden_dims[-1], 1)
for name, tensor in self.fc_layers.named_parameters():
if 'weight' in name:
nn.init.xavier_uniform_(tensor, gain=1)
for name, tensor in self.output_layer.named_parameters():
if 'weight' in name:
nn.init.xavier_uniform_(tensor, gain=1)
for name, tensor in self.cate_embeddings_table.named_parameters():
if 'weight' in name:
a = np.square(3/(self.cate_field * self.orig_embedding_dim))
nn.init.uniform_(tensor, -a, a)
# Create indexes
rows = []
cols = []
for i in range(self.cont_cate_field):
for j in range(i+1, self.cont_cate_field):
rows.append(i)
cols.append(j)
self.rows = torch.tensor(rows, device=self.device)
self.cols = torch.tensor(cols, device=self.device)
def forward(self, conts, cates, combs):
# Assert the batch sizes are the same
batch_size = conts.size()[0]
assert batch_size == cates.size()[0]
# Get continuous and categorical embeddings
cont_embedding = torch.IntTensor(np.arange(self.cont_field))\
.expand_as(conts).to(self.device)
cont_embedding = self.cate_embeddings_table(cont_embedding)
conts = conts.unsqueeze(2)
cont_embedding = torch.mul(cont_embedding, conts)
cate_embedding = self.cate_embeddings_table(cates)
cont_cate_embedding = torch.cat((cont_embedding, cate_embedding), 1)
# Compute and reshape combined embeddings
trans = torch.transpose(cont_cate_embedding, 1, 2)
gather_rows = torch.gather(trans, 2, self.rows.expand(batch_size, trans.shape[1], self.rows.shape[0]))
gather_cols = torch.gather(trans, 2, self.cols.expand(batch_size, trans.shape[1], self.rows.shape[0]))
p = torch.transpose(gather_rows, 1, 2)
q = torch.transpose(gather_cols, 1, 2)
comp_comb_embedding = torch.mul(p, q)
comp_comb_embedding = torch.sum(comp_comb_embedding, 2)
cont_embedding = cont_embedding.reshape(batch_size, -1)
cate_embedding = cate_embedding.reshape(batch_size, -1)
X = torch.cat((cont_embedding, cate_embedding, comp_comb_embedding), 1)
# Pass to FC layers
for idx in range(len(self.fc_layers)):
X = self.fc_layers[idx](X)
X = self.norm_layers[idx](X)
X = F.relu(X)
logit = self.output_layer(X)
logit = torch.sigmoid(logit)
return logit
def l2_penalty(self, conts, cates, combs):
# Assert the batch sizes are the same
batch_size = conts.size()[0]
assert batch_size == (cates.size()[0])
conts = conts.reshape(batch_size, -1)
cates = cates.reshape(batch_size, -1)
# Get continuous, categorical and free combinad embeddings
cont_embedding = torch.IntTensor(np.arange(self.cont_field))\
.expand_as(conts).to(self.device)
cont_embedding = self.cate_embeddings_table(cont_embedding)
conts = conts.unsqueeze(2)
cont_embedding = torch.mul(cont_embedding, conts)
cate_embedding = self.cate_embeddings_table(cates)
# Compute and reshape combined embeddings
X = torch.cat((cont_embedding, cate_embedding), 1).reshape(batch_size, -1)
# Calculate L2
L2 = torch.pow(X, 2) * self.lamb
L2 = L2.sum()
return L2
class DeepFM(nn.Module):
def __init__(self, cont_field, cate_field, cate_cont_feature,
orig_embedding_dim=40, hidden_dims=[100,100],
device=torch.device('cpu'), lamb=0.):
super(DeepFM, self).__init__()
self.cont_field = cont_field
self.cate_field = cate_field
self.cate_cont_feature = cate_cont_feature
self.orig_embedding_dim = orig_embedding_dim
self.device = device
self.lamb = lamb
# Compute comb_field
self.cont_cate_field = self.cate_field + self.cont_field
# Create embedding table
self.cate_embeddings_table = \
nn.Embedding(self.cate_cont_feature, self.orig_embedding_dim)
# Deep part
self.fc_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
first_layer_neurons = self.orig_embedding_dim * self.cont_cate_field
self.fc_layers.append(nn.Linear(first_layer_neurons, hidden_dims[0]))
for _, (in_size, out_size) in enumerate(zip(hidden_dims[:-1], hidden_dims[1:])):
self.fc_layers.append(nn.Linear(in_size, out_size))
for _, size in enumerate(hidden_dims):
self.norm_layers.append(nn.LayerNorm(size))
self.output_layer = nn.Linear(hidden_dims[-1], 1)
for name, tensor in self.fc_layers.named_parameters():
if 'weight' in name:
nn.init.xavier_uniform_(tensor, gain=1)
for name, tensor in self.output_layer.named_parameters():
if 'weight' in name:
nn.init.xavier_uniform_(tensor, gain=1)
for name, tensor in self.cate_embeddings_table.named_parameters():
if 'weight' in name:
a = np.square(3/(self.cate_field * self.orig_embedding_dim))
nn.init.uniform_(tensor, -a, a)
def forward(self, conts, cates, combs):
# Assert the batch sizes are the same
batch_size = conts.size()[0]
assert batch_size == cates.size()[0]
# Get continuous, categorical and free combinad embeddings
cont_embedding = torch.IntTensor(np.arange(self.cont_field))\
.expand_as(conts).to(self.device)
cont_embedding = self.cate_embeddings_table(cont_embedding)
conts = conts.unsqueeze(2)
cont_embedding = torch.mul(cont_embedding, conts)
cate_embedding = self.cate_embeddings_table(cates)
# FM part
cont_cate_embedding = torch.cat((cont_embedding, cate_embedding), 1)
square_of_sum = torch.sum(cont_cate_embedding, dim=1) ** 2
sum_of_square = torch.sum(cont_cate_embedding ** 2, dim=1)
ix = square_of_sum - sum_of_square
ix = 0.5 * ix
X_FM = torch.sum(ix, dim=1, keepdim=True)
# Deep part
cont_embedding = cont_embedding.reshape(batch_size, -1)
cate_embedding = cate_embedding.reshape(batch_size, -1)
X_DNN = torch.cat((cont_embedding, cate_embedding), 1)
for idx in range(len(self.fc_layers)):
X_DNN = self.fc_layers[idx](X_DNN)
X_DNN = self.norm_layers[idx](X_DNN)
X_DNN = F.relu(X_DNN)
X_DNN = self.output_layer(X_DNN)
logit = torch.sigmoid(X_FM + X_DNN)
return logit
def l2_penalty(self, conts, cates, combs):
# Assert the batch sizes are the same
batch_size = conts.size()[0]
assert batch_size == (cates.size()[0])
conts = conts.reshape(batch_size, -1)
cates = cates.reshape(batch_size, -1)
# Get continuous, categorical and free combinad embeddings
cont_embedding = torch.IntTensor(np.arange(self.cont_field))\
.expand_as(conts).to(self.device)
cont_embedding = self.cate_embeddings_table(cont_embedding)
conts = conts.unsqueeze(2)
cont_embedding = torch.mul(cont_embedding, conts)
cate_embedding = self.cate_embeddings_table(cates)
# Compute and reshape combined embeddings
X = torch.cat((cont_embedding, cate_embedding), 1).reshape(batch_size, -1)
# Calculate L2
L2 = torch.pow(X, 2) * self.lamb
L2 = L2.sum()
return L2
class PIN(nn.Module):
def __init__(self, cont_field, cate_field, cate_cont_feature,
orig_embedding_dim=40, hidden_dims=[100,100], subnet=[40,5],
device=torch.device('cpu'), lamb=0.):
super(PIN, self).__init__()
self.cont_field = cont_field
self.cate_field = cate_field
self.cate_cont_feature = cate_cont_feature
self.orig_embedding_dim = orig_embedding_dim
self.device = device
self.lamb = lamb
# Compute comb_field
self.cont_cate_field = self.cate_field + self.cont_field
self.comb_field = int(self.cont_cate_field * (self.cont_cate_field - 1) / 2)
# Create embedding table
self.cate_embeddings_table = \
nn.Embedding(self.cate_cont_feature, self.orig_embedding_dim)
# Create layers
self.fc_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
first_layer_neurons = self.comb_field * subnet[-1] + self.cont_cate_field * self.orig_embedding_dim
self.fc_layers.append(nn.Linear(first_layer_neurons, hidden_dims[0]))
for _, (in_size, out_size) in enumerate(zip(hidden_dims[:-1], hidden_dims[1:])):
self.fc_layers.append(nn.Linear(in_size, out_size))
for _, size in enumerate(hidden_dims):
self.norm_layers.append(nn.LayerNorm(size))
self.output_layer = nn.Linear(hidden_dims[-1], 1)
# Create sub-net
self.sub_norm_layers = nn.ModuleList()
self.sub_w = []
self.sub_b = []
layer_input = self.orig_embedding_dim * 3
for idx, layer_output in enumerate(subnet):
self.sub_w.append(torch.empty(self.comb_field, layer_input, layer_output, dtype=torch.float32, device=self.device, requires_grad=True))
self.sub_b.append(torch.empty(self.comb_field, 1, layer_output, dtype=torch.float32, device=self.device, requires_grad=True))
layer_input = layer_output
for _, size in enumerate(subnet):
self.sub_norm_layers.append(nn.LayerNorm(size))
# Initialization
for name, tensor in self.fc_layers.named_parameters():
if 'weight' in name:
nn.init.xavier_uniform_(tensor, gain=1)
for name, tensor in self.output_layer.named_parameters():
if 'weight' in name:
nn.init.xavier_uniform_(tensor, gain=1)
for name, tensor in self.cate_embeddings_table.named_parameters():
if 'weight' in name:
a = np.square(3/(self.cate_field * self.orig_embedding_dim))
nn.init.uniform_(tensor, -a, a)
for idx in range(len(self.sub_w)):
nn.init.xavier_uniform(self.sub_w[idx], gain=1)
nn.init.xavier_uniform(self.sub_b[idx], gain=1)
# Create indexes
rows = []
cols = []
for i in range(self.cont_cate_field):
for j in range(i+1, self.cont_cate_field):
rows.append(i)
cols.append(j)
self.rows = torch.tensor(rows, device=self.device)
self.cols = torch.tensor(cols, device=self.device)
def forward(self, conts, cates, combs):
# Assert the batch sizes are the same
batch_size = conts.size()[0]
assert batch_size == cates.size()[0]
# Get continuous and categorical embeddings
cont_embedding = torch.IntTensor(np.arange(self.cont_field))\
.expand_as(conts).to(self.device)
cont_embedding = self.cate_embeddings_table(cont_embedding)
conts = conts.unsqueeze(2)
cont_embedding = torch.mul(cont_embedding, conts)
cate_embedding = self.cate_embeddings_table(cates)
cont_cate_embedding = torch.cat((cont_embedding, cate_embedding), 1)
# Compute and reshape combined embeddings
trans = torch.transpose(cont_cate_embedding, 1, 2)
gather_rows = torch.gather(trans, 2, self.rows.expand(batch_size, trans.shape[1], self.rows.shape[0]))
gather_cols = torch.gather(trans, 2, self.cols.expand(batch_size, trans.shape[1], self.rows.shape[0]))
p = torch.transpose(gather_rows, 1, 2)
q = torch.transpose(gather_cols, 1, 2)
comp_comb_embedding = torch.mul(p, q)
z = torch.cat((p,q,comp_comb_embedding), 2)
z = torch.transpose(z, 0, 1)
for idx in range(len(self.sub_norm_layers)):
z = torch.matmul(z, self.sub_w[idx])
z = z + self.sub_b[idx]
z = self.sub_norm_layers[idx](z)
z = F.relu(z)
z = torch.transpose(z, 0, 1)
z = z.reshape(batch_size, -1)
cont_cate_embedding = cont_cate_embedding.reshape(batch_size, -1)
X = torch.cat((cont_cate_embedding, z), 1)
# Pass to FC layers
for idx in range(len(self.fc_layers)):
X = self.fc_layers[idx](X)
X = self.norm_layers[idx](X)
X = F.relu(X)
logit = self.output_layer(X)
logit = torch.sigmoid(logit)
return logit
def l2_penalty(self, conts, cates, combs):
# Assert the batch sizes are the same
batch_size = conts.size()[0]
assert batch_size == (cates.size()[0])
conts = conts.reshape(batch_size, -1)
cates = cates.reshape(batch_size, -1)
# Get continuous, categorical and free combinad embeddings
cont_embedding = torch.IntTensor(np.arange(self.cont_field))\
.expand_as(conts).to(self.device)
cont_embedding = self.cate_embeddings_table(cont_embedding)
conts = conts.unsqueeze(2)
cont_embedding = torch.mul(cont_embedding, conts)
cate_embedding = self.cate_embeddings_table(cates)
# Compute and reshape combined embeddings
X = torch.cat((cont_embedding, cate_embedding), 1).reshape(batch_size, -1)
# Calculate L2
L2 = torch.pow(X, 2) * self.lamb
L2 = L2.sum()
return L2
class DNN_cart(nn.Module):
def __init__(self, cont_field, cate_field, comb_field, cate_cont_feature, comb_feature,
arch=0, orig_embedding_dim=40, comb_embedding_dim=10, hidden_dims=[100,100],
device=torch.device('cpu'), alpha_mode=0, selected_pairs=None, lamb=0.):
super(DNN_cart, self).__init__()
self.cont_field = cont_field
self.cate_field = cate_field
self.comb_field = comb_field
self.cate_cont_feature = cate_cont_feature
self.comb_feature = comb_feature
self.orig_embedding_dim = orig_embedding_dim
self.comb_embedding_dim = comb_embedding_dim
self.device = device
self.alpha_mode = alpha_mode
if self.alpha_mode == 0:
self.arch = torch.from_numpy(arch).to(self.device)
if self.alpha_mode in [0,2]:
if selected_pairs == None:
self.selected_pairs = []
cont_cate_fields = self.cont_field + self.cate_field
for i in range(cont_cate_fields):
for j in range(i+1, cont_cate_fields):
self.selected_pairs.append((i,j))
else:
self.selected_pairs = selected_pairs
self.lamb = lamb
# Create embedding tables
self.cate_embeddings_table = \
nn.Embedding(self.cate_cont_feature, self.orig_embedding_dim)
if self.alpha_mode in [0,1]:
self.comb_embeddings_table = \
nn.Embedding(self.comb_feature, self.comb_embedding_dim)
if self.alpha_mode in [0,2]:
self.addition_embeddings_table = \
nn.Embedding(self.cate_cont_feature, self.comb_embedding_dim)
# Create layers
self.fc_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
first_layer_neurons = self.orig_embedding_dim * \
(self.cate_field + self.cont_field) + self.comb_embedding_dim * self.comb_field
self.fc_layers.append(nn.Linear(first_layer_neurons, hidden_dims[0]))
for _, (in_size, out_size) in enumerate(zip(hidden_dims[:-1], hidden_dims[1:])):
self.fc_layers.append(nn.Linear(in_size, out_size))
for _, size in enumerate(hidden_dims):
self.norm_layers.append(nn.LayerNorm(size))
self.output_layer = nn.Linear(hidden_dims[-1], 1)
for name, tensor in self.fc_layers.named_parameters():
if 'weight' in name:
nn.init.xavier_uniform_(tensor, gain=1)
for name, tensor in self.output_layer.named_parameters():
if 'weight' in name:
nn.init.xavier_uniform_(tensor, gain=1)
for name, tensor in self.cate_embeddings_table.named_parameters():
if 'weight' in name:
a = np.square(3/(self.cate_field * self.orig_embedding_dim))
nn.init.uniform_(tensor, -a, a)
if hasattr(self, 'addition_embeddings_table'):
for name, tensor in self.addition_embeddings_table.named_parameters():
if 'weight' in name:
a = np.square(3/(self.cate_field * self.comb_embedding_dim))
nn.init.uniform_(tensor, -a, a)
if hasattr(self, 'comb_embeddings_table'):
for name, tensor in self.comb_embeddings_table.named_parameters():
if 'weight' in name:
a = np.square(3/(self.comb_field * self.comb_embedding_dim))
nn.init.uniform_(tensor, -a, a)
def forward(self, conts, cates, combs):
# Assert the batch sizes are the same
batch_size = conts.size()[0]
assert batch_size == cates.size()[0]
assert batch_size == combs.size()[0]
# Get continuous, categorical and free combinad embeddings
cont_embedding = torch.IntTensor(np.arange(self.cont_field))\
.expand_as(conts).to(self.device)
cont_embedding = self.cate_embeddings_table(cont_embedding)
conts = conts.unsqueeze(2)
cont_embedding = torch.mul(cont_embedding, conts)
cate_embedding = self.cate_embeddings_table(cates)
# Reshape all embeddings
# Compute original features
cont_cate_embedding = torch.cat((cont_embedding, cate_embedding), 1) \
.type(torch.FloatTensor).to(self.device)
# Expand combined embedding dimension
if self.alpha_mode in [0,1]:
comb_embedding = self.comb_embeddings_table(combs)
# Compute combined embeddings
if self.alpha_mode in [0,2]:
addition_cate_embedding = self.addition_embeddings_table(cates)
for index, (i,j) in enumerate(self.selected_pairs):
embedding_i = addition_cate_embedding[:,i]
embedding_j = addition_cate_embedding[:,j]
if index == 0:
comp_comb_embedding = embedding_i.mul(embedding_j)\
.unsqueeze(1)
else:
comp_comb_embedding = torch.cat((comp_comb_embedding, \
embedding_i.mul(embedding_j).unsqueeze(1)), 1)
# Null embedding
if self.alpha_mode in [0,3]:
null_embedding = torch.zeros(batch_size, self.comb_field,
self.comb_embedding_dim, device=self.device)
# Compute final combined embedding
if self.alpha_mode == 0:
final_comb_embedding = comb_embedding.mul(self.arch[:,0].unsqueeze(0).unsqueeze(2)) \
+ comp_comb_embedding.mul(self.arch[:,1].unsqueeze(0).unsqueeze(2)) \
+ null_embedding.mul(self.arch[:,2].unsqueeze(0).unsqueeze(2))
elif self.alpha_mode == 1:
final_comb_embedding = comb_embedding
elif self.alpha_mode == 2:
final_comb_embedding = comp_comb_embedding
elif self.alpha_mode == 3:
final_comb_embedding = null_embedding
# Compute final X as model input
X = torch.cat((cont_cate_embedding.reshape(batch_size, -1),
final_comb_embedding.reshape(batch_size, -1)), 1)\
.type(torch.FloatTensor).to(self.device)
# Pass to FC layers
for idx in range(len(self.fc_layers)):
X = self.fc_layers[idx](X)
X = self.norm_layers[idx](X)
X = F.relu(X)
logit = self.output_layer(X)
logit = torch.sigmoid(logit)
return logit
# def l2_penalty(self, conts, cates, combs):
# # Assert the batch sizes are the same
# batch_size = conts.size()[0]
# assert batch_size == cates.size()[0]
# assert batch_size == combs.size()[0]
# # Get continuous, categorical and free combinad embeddings
# cont_embedding = torch.IntTensor(np.arange(self.cont_field))\
# .expand_as(conts).to(self.device)
# cont_embedding = self.cate_embeddings_table(cont_embedding)
# conts = conts.unsqueeze(2)
# cont_embedding = torch.mul(cont_embedding, conts)
# cate_embedding = self.cate_embeddings_table(cates)
# # Reshape all embeddings
# # Compute original features
# cont_cate_embedding = torch.cat((cont_embedding, cate_embedding), 1) \
# .type(torch.FloatTensor).to(self.device)
# # Expand combined embedding dimension
# if self.alpha_mode in [0,1]:
# comb_embedding = self.comb_embeddings_table(combs)
# # Compute combined embeddings
# if self.alpha_mode in [0,2]:
# addition_cate_embedding = self.addition_embeddings_table(cates)
# for index, (i,j) in enumerate(self.selected_pairs):
# embedding_i = addition_cate_embedding[:,i]
# embedding_j = addition_cate_embedding[:,j]
# if index == 0:
# comp_comb_embedding = embedding_i.mul(embedding_j)\
# .unsqueeze(1)
# else:
# comp_comb_embedding = torch.cat((comp_comb_embedding, \
# embedding_i.mul(embedding_j).unsqueeze(1)), 1)
# # Null embedding
# if self.alpha_mode in [0,3]:
# null_embedding = torch.zeros(batch_size, self.comb_field,
# self.comb_embedding_dim, device=self.device)
# # Compute final combined embedding
# if self.alpha_mode == 0:
# final_comb_embedding = comb_embedding.mul(self.arch[:,0].unsqueeze(0).unsqueeze(2)) \
# + comp_comb_embedding.mul(self.arch[:,1].unsqueeze(0).unsqueeze(2)) \
# + null_embedding.mul(self.arch[:,2].unsqueeze(0).unsqueeze(2))
# elif self.alpha_mode == 1:
# final_comb_embedding = comb_embedding
# elif self.alpha_mode == 2:
# final_comb_embedding = comp_comb_embedding
# elif self.alpha_mode == 3:
# final_comb_embedding = null_embedding
# # Compute and reshape combined embeddings
# X = torch.cat((cont_cate_embedding.reshape(batch_size, -1),
# final_comb_embedding.reshape(batch_size, -1)), 1)\
# .type(torch.FloatTensor).to(self.device)
# # Calculate L2
# L2 = torch.pow(X, 2) * self.lamb
# L2 = L2.sum()
# return L2
##### Model and Alpha Mode #####
# Model: DNN_cart
# 0: using pre-searched architecture,
# feature combinations including cartesian product, IPNN or null
# 1: only using cartesian product to model feature combination
# 2: only using original embedding to compute feature combination
# 3: do not model feature combination, equal to FNN
# Model: IPNN
# Model: FNN
# Model: FM
# Model: PIN
##### ========== #####
def getmodel(model_name, cont_field, cate_field, cate_cont_feature, comb_feature,
comb_field=0, arch=0, orig_embedding_dim=40, comb_embedding_dim=40,
hidden_dims=[500,500,500,500,500], device=torch.device('cpu'), alpha_mode=1,
lamb=0., selected_pairs=None, id_offsets=None):
if model_name == 'DNN_cart':
model = DNN_cart(cont_field, cate_field, comb_field, cate_cont_feature,
comb_feature, arch=arch, orig_embedding_dim=orig_embedding_dim,
comb_embedding_dim=comb_embedding_dim, hidden_dims=hidden_dims,
device=device, alpha_mode=alpha_mode, selected_pairs=selected_pairs,
lamb=lamb)
elif model_name == 'LR':
model = LR(cont_field, cate_field, cate_cont_feature, device=device, lamb=lamb)
elif model_name == 'FM':
model = FM(cont_field, cate_field, cate_cont_feature, device=device,
orig_embedding_dim=orig_embedding_dim, hidden_dims=hidden_dims, lamb=lamb)
elif model_name == 'Poly2':
model = Poly2(cont_field, cate_field, comb_field, cate_cont_feature,
comb_feature, device=device, lamb=lamb)
elif model_name == 'IPNN':
model = IPNN(cont_field, cate_field, cate_cont_feature, device=device,
orig_embedding_dim=orig_embedding_dim, hidden_dims=hidden_dims, lamb=lamb)
elif model_name == 'FNN':
model = FNN(cont_field, cate_field, cate_cont_feature, device=device,
orig_embedding_dim=orig_embedding_dim, hidden_dims=hidden_dims, lamb=lamb)
elif model_name == 'DeepFM':
model = DeepFM(cont_field, cate_field, cate_cont_feature, device=device,
orig_embedding_dim=orig_embedding_dim, hidden_dims=hidden_dims, lamb=lamb)
elif model_name == 'PIN':
model = PIN(cont_field, cate_field, cate_cont_feature, device=device,
orig_embedding_dim=orig_embedding_dim, hidden_dims=hidden_dims, lamb=lamb)
return model
| 41.526316
| 147
| 0.626464
| 5,089
| 39,450
| 4.607978
| 0.038318
| 0.056546
| 0.033433
| 0.047079
| 0.938806
| 0.926482
| 0.907292
| 0.897058
| 0.885629
| 0.874499
| 0
| 0.014838
| 0.270545
| 39,450
| 949
| 148
| 41.570074
| 0.800049
| 0.142053
| 0
| 0.790735
| 0
| 0
| 0.006892
| 0.001367
| 0
| 0
| 0
| 0
| 0.027157
| 1
| 0.038339
| false
| 0
| 0.009585
| 0
| 0.086262
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cebb26af08f259dd47ff7e6a097c2b3b31db7a18
| 6,269
|
py
|
Python
|
loldib/getratings/models/NA/na_sona/na_sona_jng.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_sona/na_sona_jng.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_sona/na_sona_jng.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Sona_Jng_Aatrox(Ratings):
pass
class NA_Sona_Jng_Ahri(Ratings):
pass
class NA_Sona_Jng_Akali(Ratings):
pass
class NA_Sona_Jng_Alistar(Ratings):
pass
class NA_Sona_Jng_Amumu(Ratings):
pass
class NA_Sona_Jng_Anivia(Ratings):
pass
class NA_Sona_Jng_Annie(Ratings):
pass
class NA_Sona_Jng_Ashe(Ratings):
pass
class NA_Sona_Jng_AurelionSol(Ratings):
pass
class NA_Sona_Jng_Azir(Ratings):
pass
class NA_Sona_Jng_Bard(Ratings):
pass
class NA_Sona_Jng_Blitzcrank(Ratings):
pass
class NA_Sona_Jng_Brand(Ratings):
pass
class NA_Sona_Jng_Braum(Ratings):
pass
class NA_Sona_Jng_Caitlyn(Ratings):
pass
class NA_Sona_Jng_Camille(Ratings):
pass
class NA_Sona_Jng_Cassiopeia(Ratings):
pass
class NA_Sona_Jng_Chogath(Ratings):
pass
class NA_Sona_Jng_Corki(Ratings):
pass
class NA_Sona_Jng_Darius(Ratings):
pass
class NA_Sona_Jng_Diana(Ratings):
pass
class NA_Sona_Jng_Draven(Ratings):
pass
class NA_Sona_Jng_DrMundo(Ratings):
pass
class NA_Sona_Jng_Ekko(Ratings):
pass
class NA_Sona_Jng_Elise(Ratings):
pass
class NA_Sona_Jng_Evelynn(Ratings):
pass
class NA_Sona_Jng_Ezreal(Ratings):
pass
class NA_Sona_Jng_Fiddlesticks(Ratings):
pass
class NA_Sona_Jng_Fiora(Ratings):
pass
class NA_Sona_Jng_Fizz(Ratings):
pass
class NA_Sona_Jng_Galio(Ratings):
pass
class NA_Sona_Jng_Gangplank(Ratings):
pass
class NA_Sona_Jng_Garen(Ratings):
pass
class NA_Sona_Jng_Gnar(Ratings):
pass
class NA_Sona_Jng_Gragas(Ratings):
pass
class NA_Sona_Jng_Graves(Ratings):
pass
class NA_Sona_Jng_Hecarim(Ratings):
pass
class NA_Sona_Jng_Heimerdinger(Ratings):
pass
class NA_Sona_Jng_Illaoi(Ratings):
pass
class NA_Sona_Jng_Irelia(Ratings):
pass
class NA_Sona_Jng_Ivern(Ratings):
pass
class NA_Sona_Jng_Janna(Ratings):
pass
class NA_Sona_Jng_JarvanIV(Ratings):
pass
class NA_Sona_Jng_Jax(Ratings):
pass
class NA_Sona_Jng_Jayce(Ratings):
pass
class NA_Sona_Jng_Jhin(Ratings):
pass
class NA_Sona_Jng_Jinx(Ratings):
pass
class NA_Sona_Jng_Kalista(Ratings):
pass
class NA_Sona_Jng_Karma(Ratings):
pass
class NA_Sona_Jng_Karthus(Ratings):
pass
class NA_Sona_Jng_Kassadin(Ratings):
pass
class NA_Sona_Jng_Katarina(Ratings):
pass
class NA_Sona_Jng_Kayle(Ratings):
pass
class NA_Sona_Jng_Kayn(Ratings):
pass
class NA_Sona_Jng_Kennen(Ratings):
pass
class NA_Sona_Jng_Khazix(Ratings):
pass
class NA_Sona_Jng_Kindred(Ratings):
pass
class NA_Sona_Jng_Kled(Ratings):
pass
class NA_Sona_Jng_KogMaw(Ratings):
pass
class NA_Sona_Jng_Leblanc(Ratings):
pass
class NA_Sona_Jng_LeeSin(Ratings):
pass
class NA_Sona_Jng_Leona(Ratings):
pass
class NA_Sona_Jng_Lissandra(Ratings):
pass
class NA_Sona_Jng_Lucian(Ratings):
pass
class NA_Sona_Jng_Lulu(Ratings):
pass
class NA_Sona_Jng_Lux(Ratings):
pass
class NA_Sona_Jng_Malphite(Ratings):
pass
class NA_Sona_Jng_Malzahar(Ratings):
pass
class NA_Sona_Jng_Maokai(Ratings):
pass
class NA_Sona_Jng_MasterYi(Ratings):
pass
class NA_Sona_Jng_MissFortune(Ratings):
pass
class NA_Sona_Jng_MonkeyKing(Ratings):
pass
class NA_Sona_Jng_Mordekaiser(Ratings):
pass
class NA_Sona_Jng_Morgana(Ratings):
pass
class NA_Sona_Jng_Nami(Ratings):
pass
class NA_Sona_Jng_Nasus(Ratings):
pass
class NA_Sona_Jng_Nautilus(Ratings):
pass
class NA_Sona_Jng_Nidalee(Ratings):
pass
class NA_Sona_Jng_Nocturne(Ratings):
pass
class NA_Sona_Jng_Nunu(Ratings):
pass
class NA_Sona_Jng_Olaf(Ratings):
pass
class NA_Sona_Jng_Orianna(Ratings):
pass
class NA_Sona_Jng_Ornn(Ratings):
pass
class NA_Sona_Jng_Pantheon(Ratings):
pass
class NA_Sona_Jng_Poppy(Ratings):
pass
class NA_Sona_Jng_Quinn(Ratings):
pass
class NA_Sona_Jng_Rakan(Ratings):
pass
class NA_Sona_Jng_Rammus(Ratings):
pass
class NA_Sona_Jng_RekSai(Ratings):
pass
class NA_Sona_Jng_Renekton(Ratings):
pass
class NA_Sona_Jng_Rengar(Ratings):
pass
class NA_Sona_Jng_Riven(Ratings):
pass
class NA_Sona_Jng_Rumble(Ratings):
pass
class NA_Sona_Jng_Ryze(Ratings):
pass
class NA_Sona_Jng_Sejuani(Ratings):
pass
class NA_Sona_Jng_Shaco(Ratings):
pass
class NA_Sona_Jng_Shen(Ratings):
pass
class NA_Sona_Jng_Shyvana(Ratings):
pass
class NA_Sona_Jng_Singed(Ratings):
pass
class NA_Sona_Jng_Sion(Ratings):
pass
class NA_Sona_Jng_Sivir(Ratings):
pass
class NA_Sona_Jng_Skarner(Ratings):
pass
class NA_Sona_Jng_Sona(Ratings):
pass
class NA_Sona_Jng_Soraka(Ratings):
pass
class NA_Sona_Jng_Swain(Ratings):
pass
class NA_Sona_Jng_Syndra(Ratings):
pass
class NA_Sona_Jng_TahmKench(Ratings):
pass
class NA_Sona_Jng_Taliyah(Ratings):
pass
class NA_Sona_Jng_Talon(Ratings):
pass
class NA_Sona_Jng_Taric(Ratings):
pass
class NA_Sona_Jng_Teemo(Ratings):
pass
class NA_Sona_Jng_Thresh(Ratings):
pass
class NA_Sona_Jng_Tristana(Ratings):
pass
class NA_Sona_Jng_Trundle(Ratings):
pass
class NA_Sona_Jng_Tryndamere(Ratings):
pass
class NA_Sona_Jng_TwistedFate(Ratings):
pass
class NA_Sona_Jng_Twitch(Ratings):
pass
class NA_Sona_Jng_Udyr(Ratings):
pass
class NA_Sona_Jng_Urgot(Ratings):
pass
class NA_Sona_Jng_Varus(Ratings):
pass
class NA_Sona_Jng_Vayne(Ratings):
pass
class NA_Sona_Jng_Veigar(Ratings):
pass
class NA_Sona_Jng_Velkoz(Ratings):
pass
class NA_Sona_Jng_Vi(Ratings):
pass
class NA_Sona_Jng_Viktor(Ratings):
pass
class NA_Sona_Jng_Vladimir(Ratings):
pass
class NA_Sona_Jng_Volibear(Ratings):
pass
class NA_Sona_Jng_Warwick(Ratings):
pass
class NA_Sona_Jng_Xayah(Ratings):
pass
class NA_Sona_Jng_Xerath(Ratings):
pass
class NA_Sona_Jng_XinZhao(Ratings):
pass
class NA_Sona_Jng_Yasuo(Ratings):
pass
class NA_Sona_Jng_Yorick(Ratings):
pass
class NA_Sona_Jng_Zac(Ratings):
pass
class NA_Sona_Jng_Zed(Ratings):
pass
class NA_Sona_Jng_Ziggs(Ratings):
pass
class NA_Sona_Jng_Zilean(Ratings):
pass
class NA_Sona_Jng_Zyra(Ratings):
pass
| 15.033573
| 46
| 0.75642
| 972
| 6,269
| 4.452675
| 0.151235
| 0.223198
| 0.350739
| 0.446396
| 0.791359
| 0.791359
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177221
| 6,269
| 416
| 47
| 15.069712
| 0.839085
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
0cb079d7f1e3e0d565a5dc47803b5e2f83b122ef
| 12,522
|
py
|
Python
|
src/lms-harmonizer/tests_integration_sql/procs/mssql/harmonize_assignment_submissions/test_lmsassignment_submissions.py
|
markramonDL/LMS-Toolkit
|
d7097f9e063f39a45c8a08ec7316d2a1c4034e50
|
[
"Apache-2.0"
] | null | null | null |
src/lms-harmonizer/tests_integration_sql/procs/mssql/harmonize_assignment_submissions/test_lmsassignment_submissions.py
|
markramonDL/LMS-Toolkit
|
d7097f9e063f39a45c8a08ec7316d2a1c4034e50
|
[
"Apache-2.0"
] | null | null | null |
src/lms-harmonizer/tests_integration_sql/procs/mssql/harmonize_assignment_submissions/test_lmsassignment_submissions.py
|
markramonDL/LMS-Toolkit
|
d7097f9e063f39a45c8a08ec7316d2a1c4034e50
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
from tests_integration_sql.mssql_loader import (
insert_edfi_student,
insert_lms_assignment,
insert_lms_section,
insert_edfi_section,
insert_descriptor,
insert_lmsx_sourcesystem_descriptor,
insert_lmsx_assignmentcategory_descriptor,
insert_lms_assignment_submissions,
insert_lms_user,
insert_lmsx_assignmentsubmissionstatus_descriptor,
)
from tests_integration_sql.mssql_connection import MSSqlConnection, query
from tests_integration_sql.server_config import ServerConfig
from tests_integration_sql.orchestrator import run_harmonizer
SOURCE_SYSTEM = "Test_LMS"
DESCRIPTOR_NAMESPACE = (
"uri://ed-fi.org/edfilms/AssignmentCategoryDescriptor/" + SOURCE_SYSTEM
)
SUBMISSION_STATUS_DESCRIPTOR_NAMESPACE = (
"uri://ed-fi.org/edfilms/SubmissionStatusDescriptor/" + SOURCE_SYSTEM
)
USER_TEST_EMAIL = "test@email.email"
def describe_when_lms_and_ods_tables_are_both_empty():
def it_should_run_successfully(test_db_config: ServerConfig):
# act
run_harmonizer(test_db_config)
# assert - no errors
def describe_when_there_are_assignment_submissions_to_insert():
SIS_SECTION_ID = "sis_section_id"
ASSIGNMENT_SOURCE_SYSTEM_IDENTIFIER = "assignment_identifier"
ASSIGNMENT_CATEGORY = "test_category"
ASSIGNMENT_SUBMISSION_STATUS = "test_submission_status"
USER_SIS_ID = "test_sis_id"
SUBMISSION_TEST_IDENTIFIER = "submission_test_identifier"
SUBMISSION_TEST_LMS_IDENTIFIER = 99
def it_should_insert_the_submissions_successfully(test_db_config: ServerConfig):
# arrange
with MSSqlConnection(test_db_config).pyodbc_conn() as connection:
insert_descriptor(connection, DESCRIPTOR_NAMESPACE, ASSIGNMENT_CATEGORY)
insert_lmsx_assignmentcategory_descriptor(connection, 1)
insert_descriptor(connection, DESCRIPTOR_NAMESPACE, SOURCE_SYSTEM)
insert_lmsx_sourcesystem_descriptor(connection, 2)
insert_descriptor(
connection,
SUBMISSION_STATUS_DESCRIPTOR_NAMESPACE,
ASSIGNMENT_SUBMISSION_STATUS,
)
insert_lmsx_assignmentsubmissionstatus_descriptor(connection, 3)
insert_lms_section(connection, SIS_SECTION_ID, SOURCE_SYSTEM)
insert_edfi_section(connection, SIS_SECTION_ID)
connection.execute(
"""UPDATE LMS.LMSSECTION SET
EdFiSectionId = (SELECT TOP 1 ID FROM EDFI.SECTION)"""
)
insert_lms_assignment(
connection,
ASSIGNMENT_SOURCE_SYSTEM_IDENTIFIER,
SOURCE_SYSTEM,
1,
ASSIGNMENT_CATEGORY,
)
insert_lms_user(connection, USER_SIS_ID, USER_TEST_EMAIL, SOURCE_SYSTEM)
insert_edfi_student(connection, USER_SIS_ID)
connection.execute(
"""UPDATE LMS.LMSUSER SET
EdFiStudentId = (SELECT TOP 1 ID FROM EDFI.Student)"""
)
insert_lms_assignment_submissions(
connection,
SUBMISSION_TEST_LMS_IDENTIFIER,
SUBMISSION_TEST_IDENTIFIER,
1,
1,
ASSIGNMENT_SUBMISSION_STATUS,
SOURCE_SYSTEM,
False,
)
# act
run_harmonizer(test_db_config)
# assert
with MSSqlConnection(test_db_config).pyodbc_conn() as connection:
LMSAssignmentSubmission = query(
connection, "SELECT * from [lmsx].[AssignmentSubmission]"
)
assert len(LMSAssignmentSubmission) == 1
assert (
int(LMSAssignmentSubmission[0]["AssignmentSubmissionIdentifier"]) == SUBMISSION_TEST_LMS_IDENTIFIER
)
def describe_when_there_are_assignment_submissions_to_update():
SIS_SECTION_ID = "sis_section_id"
ASSIGNMENT_SOURCE_SYSTEM_IDENTIFIER = "assignment_identifier"
ASSIGNMENT_CATEGORY = "test_category"
ASSIGNMENT_SUBMISSION_STATUS = "test_submission_status"
USER_SIS_ID = "test_sis_id"
SUBMISSION_TEST_IDENTIFIER = "submission_test_identifier"
SUBMISSION_TEST_LMS_IDENTIFIER = 99
SUBMISSION_GRADE = '85'
def it_should_update_the_submissions_successfully(test_db_config: ServerConfig):
# arrange
with MSSqlConnection(test_db_config).pyodbc_conn() as connection:
insert_descriptor(connection, DESCRIPTOR_NAMESPACE, ASSIGNMENT_CATEGORY)
insert_lmsx_assignmentcategory_descriptor(connection, 1)
insert_descriptor(connection, DESCRIPTOR_NAMESPACE, SOURCE_SYSTEM)
insert_lmsx_sourcesystem_descriptor(connection, 2)
insert_descriptor(
connection,
SUBMISSION_STATUS_DESCRIPTOR_NAMESPACE,
ASSIGNMENT_SUBMISSION_STATUS,
)
insert_lmsx_assignmentsubmissionstatus_descriptor(connection, 3)
insert_lms_section(connection, SIS_SECTION_ID, SOURCE_SYSTEM)
insert_edfi_section(connection, SIS_SECTION_ID)
connection.execute(
"""UPDATE LMS.LMSSECTION SET
EdFiSectionId = (SELECT TOP 1 ID FROM EDFI.SECTION)"""
)
insert_lms_assignment(
connection,
ASSIGNMENT_SOURCE_SYSTEM_IDENTIFIER,
SOURCE_SYSTEM,
1,
ASSIGNMENT_CATEGORY,
)
insert_lms_user(connection, USER_SIS_ID, USER_TEST_EMAIL, SOURCE_SYSTEM)
insert_edfi_student(connection, USER_SIS_ID)
connection.execute(
"""UPDATE LMS.LMSUSER SET
EdFiStudentId = (SELECT TOP 1 ID FROM EDFI.Student)"""
)
insert_lms_assignment_submissions(
connection,
SUBMISSION_TEST_LMS_IDENTIFIER,
SUBMISSION_TEST_IDENTIFIER,
1,
1,
ASSIGNMENT_SUBMISSION_STATUS,
SOURCE_SYSTEM,
False,
)
run_harmonizer(test_db_config)
with MSSqlConnection(test_db_config).pyodbc_conn() as connection:
connection.execute(
F"""
UPDATE LMS.ASSIGNMENTSUBMISSION SET
GRADE=N'{SUBMISSION_GRADE}',
LASTMODIFIEDDATE=GETDATE()"""
) # In the first insert it is set to 0
# act
run_harmonizer(test_db_config)
# assert
with MSSqlConnection(test_db_config).pyodbc_conn() as connection:
LMSAssignmentSubmission = query(
connection, "SELECT * from [lmsx].[AssignmentSubmission]"
)
assert LMSAssignmentSubmission[0]["Grade"] == SUBMISSION_GRADE
def describe_when_there_are_assignment_submissions_for_deleted_assignments():
SIS_SECTION_ID = "sis_section_id"
ASSIGNMENT_SOURCE_SYSTEM_IDENTIFIER = "assignment_identifier"
ASSIGNMENT_CATEGORY = "test_category"
ASSIGNMENT_SUBMISSION_STATUS = "test_submission_status"
USER_SIS_ID = "test_sis_id"
SUBMISSION_TEST_IDENTIFIER = "submission_test_identifier"
SUBMISSION_TEST_LMS_IDENTIFIER = 99
def it_should_not_insert_the_submissions(test_db_config: ServerConfig):
# arrange
with MSSqlConnection(test_db_config).pyodbc_conn() as connection:
insert_descriptor(connection, DESCRIPTOR_NAMESPACE, ASSIGNMENT_CATEGORY)
insert_lmsx_assignmentcategory_descriptor(connection, 1)
insert_descriptor(connection, DESCRIPTOR_NAMESPACE, SOURCE_SYSTEM)
insert_lmsx_sourcesystem_descriptor(connection, 2)
insert_descriptor(
connection,
SUBMISSION_STATUS_DESCRIPTOR_NAMESPACE,
ASSIGNMENT_SUBMISSION_STATUS,
)
insert_lmsx_assignmentsubmissionstatus_descriptor(connection, 3)
insert_lms_section(connection, SIS_SECTION_ID, SOURCE_SYSTEM)
insert_edfi_section(connection, SIS_SECTION_ID)
connection.execute(
"""UPDATE LMS.LMSSECTION SET
EdFiSectionId = (SELECT TOP 1 ID FROM EDFI.SECTION)"""
)
insert_lms_assignment(
connection,
ASSIGNMENT_SOURCE_SYSTEM_IDENTIFIER,
SOURCE_SYSTEM,
1,
ASSIGNMENT_CATEGORY,
)
insert_lms_user(connection, USER_SIS_ID, USER_TEST_EMAIL, SOURCE_SYSTEM)
insert_edfi_student(connection, USER_SIS_ID)
connection.execute(
"""UPDATE LMS.LMSUSER SET
EdFiStudentId = (SELECT TOP 1 ID FROM EDFI.Student)"""
)
insert_lms_assignment_submissions(
connection,
SUBMISSION_TEST_LMS_IDENTIFIER,
SUBMISSION_TEST_IDENTIFIER,
1,
1,
ASSIGNMENT_SUBMISSION_STATUS,
SOURCE_SYSTEM,
True, # deleted = True
)
# act
run_harmonizer(test_db_config)
# assert
with MSSqlConnection(test_db_config).pyodbc_conn() as connection:
LMSAssignmentSubmission = query(
connection, "SELECT * from [lmsx].[AssignmentSubmission]"
)
assert len(LMSAssignmentSubmission) == 0
def describe_when_there_are_lmsx_assignment_submissions_and_lms_assignment_is_deleted():
SIS_SECTION_ID = "sis_section_id"
ASSIGNMENT_SOURCE_SYSTEM_IDENTIFIER = "assignment_identifier"
ASSIGNMENT_CATEGORY = "test_category"
ASSIGNMENT_SUBMISSION_STATUS = "test_submission_status"
USER_SIS_ID = "test_sis_id"
SUBMISSION_TEST_IDENTIFIER = "submission_test_identifier"
SUBMISSION_TEST_LMS_IDENTIFIER = 99
def it_should_not_insert_any_submissions(test_db_config: ServerConfig):
# arrange
with MSSqlConnection(test_db_config).pyodbc_conn() as connection:
insert_descriptor(connection, DESCRIPTOR_NAMESPACE, ASSIGNMENT_CATEGORY)
insert_lmsx_assignmentcategory_descriptor(connection, 1)
insert_descriptor(connection, DESCRIPTOR_NAMESPACE, SOURCE_SYSTEM)
insert_lmsx_sourcesystem_descriptor(connection, 2)
insert_descriptor(
connection,
SUBMISSION_STATUS_DESCRIPTOR_NAMESPACE,
ASSIGNMENT_SUBMISSION_STATUS,
)
insert_lmsx_assignmentsubmissionstatus_descriptor(connection, 3)
insert_lms_section(connection, SIS_SECTION_ID, SOURCE_SYSTEM)
insert_edfi_section(connection, SIS_SECTION_ID)
connection.execute(
"""UPDATE LMS.LMSSECTION SET
EdFiSectionId = (SELECT TOP 1 ID FROM EDFI.SECTION)"""
)
insert_lms_assignment(
connection,
ASSIGNMENT_SOURCE_SYSTEM_IDENTIFIER,
SOURCE_SYSTEM,
1,
ASSIGNMENT_CATEGORY,
)
insert_lms_user(connection, USER_SIS_ID, USER_TEST_EMAIL, SOURCE_SYSTEM)
insert_edfi_student(connection, USER_SIS_ID)
connection.execute(
"""UPDATE LMS.LMSUSER SET
EdFiStudentId = (SELECT TOP 1 ID FROM EDFI.Student)"""
)
insert_lms_assignment_submissions(
connection,
SUBMISSION_TEST_LMS_IDENTIFIER,
SUBMISSION_TEST_IDENTIFIER,
1,
1,
ASSIGNMENT_SUBMISSION_STATUS,
SOURCE_SYSTEM,
False,
)
run_harmonizer(test_db_config)
connection.execute("update lms.assignment set deletedat = GETDATE()")
# act
run_harmonizer(test_db_config)
# assert
with MSSqlConnection(test_db_config).pyodbc_conn() as connection:
LMSAssignmentSubmission = query(
connection, "SELECT * from [lmsx].[AssignmentSubmission]"
)
assert len(LMSAssignmentSubmission) == 0
| 36.938053
| 115
| 0.646862
| 1,177
| 12,522
| 6.449448
| 0.109601
| 0.049005
| 0.033197
| 0.032012
| 0.837571
| 0.818469
| 0.818469
| 0.798709
| 0.786589
| 0.779607
| 0
| 0.005906
| 0.296838
| 12,522
| 338
| 116
| 37.047337
| 0.856218
| 0.032343
| 0
| 0.713115
| 0
| 0
| 0.085076
| 0.053527
| 0
| 0
| 0
| 0
| 0.020492
| 1
| 0.040984
| false
| 0
| 0.016393
| 0
| 0.057377
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0b32ac7b709b006a42cb96b35a59aa0a9213a083
| 22,911
|
py
|
Python
|
influxdb_client/service/authorizations_service.py
|
kelseiv/influxdb-client-python
|
9a0d2d659157cca96f6a04818fdeb215d699bdd7
|
[
"MIT"
] | 1
|
2021-06-06T10:39:47.000Z
|
2021-06-06T10:39:47.000Z
|
influxdb_client/service/authorizations_service.py
|
kelseiv/influxdb-client-python
|
9a0d2d659157cca96f6a04818fdeb215d699bdd7
|
[
"MIT"
] | null | null | null |
influxdb_client/service/authorizations_service.py
|
kelseiv/influxdb-client-python
|
9a0d2d659157cca96f6a04818fdeb215d699bdd7
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Influx API Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from influxdb_client.api_client import ApiClient
class AuthorizationsService(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_authorizations_id(self, auth_id, **kwargs): # noqa: E501
"""Delete a authorization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_authorizations_id(auth_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str auth_id: The ID of the authorization to delete. (required)
:param str zap_trace_span: OpenTracing span context
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_authorizations_id_with_http_info(auth_id, **kwargs) # noqa: E501
else:
(data) = self.delete_authorizations_id_with_http_info(auth_id, **kwargs) # noqa: E501
return data
def delete_authorizations_id_with_http_info(self, auth_id, **kwargs): # noqa: E501
"""Delete a authorization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_authorizations_id_with_http_info(auth_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str auth_id: The ID of the authorization to delete. (required)
:param str zap_trace_span: OpenTracing span context
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['auth_id', 'zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_authorizations_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'auth_id' is set
if ('auth_id' not in local_var_params or
local_var_params['auth_id'] is None):
raise ValueError("Missing the required parameter `auth_id` when calling `delete_authorizations_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'auth_id' in local_var_params:
path_params['authID'] = local_var_params['auth_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v2/authorizations/{authID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_authorizations(self, **kwargs): # noqa: E501
"""List all authorizations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_authorizations(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:param str user_id: Only show authorizations that belong to a user ID.
:param str user: Only show authorizations that belong to a user name.
:param str org_id: Only show authorizations that belong to an organization ID.
:param str org: Only show authorizations that belong to a organization name.
:return: Authorizations
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_authorizations_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_authorizations_with_http_info(**kwargs) # noqa: E501
return data
def get_authorizations_with_http_info(self, **kwargs): # noqa: E501
"""List all authorizations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_authorizations_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:param str user_id: Only show authorizations that belong to a user ID.
:param str user: Only show authorizations that belong to a user name.
:param str org_id: Only show authorizations that belong to an organization ID.
:param str org: Only show authorizations that belong to a organization name.
:return: Authorizations
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['zap_trace_span', 'user_id', 'user', 'org_id', 'org'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_authorizations" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'user_id' in local_var_params:
query_params.append(('userID', local_var_params['user_id'])) # noqa: E501
if 'user' in local_var_params:
query_params.append(('user', local_var_params['user'])) # noqa: E501
if 'org_id' in local_var_params:
query_params.append(('orgID', local_var_params['org_id'])) # noqa: E501
if 'org' in local_var_params:
query_params.append(('org', local_var_params['org'])) # noqa: E501
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v2/authorizations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Authorizations', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_authorizations_id(self, auth_id, **kwargs): # noqa: E501
"""Retrieve an authorization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_authorizations_id(auth_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str auth_id: The ID of the authorization to get. (required)
:param str zap_trace_span: OpenTracing span context
:return: Authorization
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_authorizations_id_with_http_info(auth_id, **kwargs) # noqa: E501
else:
(data) = self.get_authorizations_id_with_http_info(auth_id, **kwargs) # noqa: E501
return data
def get_authorizations_id_with_http_info(self, auth_id, **kwargs): # noqa: E501
"""Retrieve an authorization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_authorizations_id_with_http_info(auth_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str auth_id: The ID of the authorization to get. (required)
:param str zap_trace_span: OpenTracing span context
:return: Authorization
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['auth_id', 'zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_authorizations_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'auth_id' is set
if ('auth_id' not in local_var_params or
local_var_params['auth_id'] is None):
raise ValueError("Missing the required parameter `auth_id` when calling `get_authorizations_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'auth_id' in local_var_params:
path_params['authID'] = local_var_params['auth_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v2/authorizations/{authID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Authorization', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_authorizations_id(self, auth_id, authorization_update_request, **kwargs): # noqa: E501
"""Update an authorization to be active or inactive # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_authorizations_id(auth_id, authorization_update_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str auth_id: The ID of the authorization to update. (required)
:param AuthorizationUpdateRequest authorization_update_request: Authorization to update (required)
:param str zap_trace_span: OpenTracing span context
:return: Authorization
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_authorizations_id_with_http_info(auth_id, authorization_update_request, **kwargs) # noqa: E501
else:
(data) = self.patch_authorizations_id_with_http_info(auth_id, authorization_update_request, **kwargs) # noqa: E501
return data
def patch_authorizations_id_with_http_info(self, auth_id, authorization_update_request, **kwargs): # noqa: E501
"""Update an authorization to be active or inactive # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_authorizations_id_with_http_info(auth_id, authorization_update_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str auth_id: The ID of the authorization to update. (required)
:param AuthorizationUpdateRequest authorization_update_request: Authorization to update (required)
:param str zap_trace_span: OpenTracing span context
:return: Authorization
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['auth_id', 'authorization_update_request', 'zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_authorizations_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'auth_id' is set
if ('auth_id' not in local_var_params or
local_var_params['auth_id'] is None):
raise ValueError("Missing the required parameter `auth_id` when calling `patch_authorizations_id`") # noqa: E501
# verify the required parameter 'authorization_update_request' is set
if ('authorization_update_request' not in local_var_params or
local_var_params['authorization_update_request'] is None):
raise ValueError("Missing the required parameter `authorization_update_request` when calling `patch_authorizations_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'auth_id' in local_var_params:
path_params['authID'] = local_var_params['auth_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'authorization_update_request' in local_var_params:
body_params = local_var_params['authorization_update_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v2/authorizations/{authID}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Authorization', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def post_authorizations(self, authorization, **kwargs): # noqa: E501
"""Create an authorization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_authorizations(authorization, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Authorization authorization: Authorization to create (required)
:param str zap_trace_span: OpenTracing span context
:return: Authorization
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_authorizations_with_http_info(authorization, **kwargs) # noqa: E501
else:
(data) = self.post_authorizations_with_http_info(authorization, **kwargs) # noqa: E501
return data
def post_authorizations_with_http_info(self, authorization, **kwargs): # noqa: E501
"""Create an authorization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_authorizations_with_http_info(authorization, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Authorization authorization: Authorization to create (required)
:param str zap_trace_span: OpenTracing span context
:return: Authorization
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['authorization', 'zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_authorizations" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'authorization' is set
if ('authorization' not in local_var_params or
local_var_params['authorization'] is None):
raise ValueError("Missing the required parameter `authorization` when calling `post_authorizations`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'authorization' in local_var_params:
body_params = local_var_params['authorization']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v2/authorizations', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Authorization', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 41.808394
| 146
| 0.637423
| 2,707
| 22,911
| 5.093461
| 0.06317
| 0.051059
| 0.079199
| 0.02611
| 0.934726
| 0.926385
| 0.910212
| 0.902161
| 0.890775
| 0.876632
| 0
| 0.014821
| 0.278469
| 22,911
| 547
| 147
| 41.884826
| 0.819249
| 0.328838
| 0
| 0.75945
| 1
| 0
| 0.188123
| 0.057911
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037801
| false
| 0
| 0.013746
| 0
| 0.106529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0b4592ff0c3e7dd1333b231450c91bdf43e51ab6
| 292
|
py
|
Python
|
mundo 2 (for,if)/back.py
|
Pedroluis1/python
|
d949fa2646c049aa51a41a32dc62de7b14eae90f
|
[
"MIT"
] | null | null | null |
mundo 2 (for,if)/back.py
|
Pedroluis1/python
|
d949fa2646c049aa51a41a32dc62de7b14eae90f
|
[
"MIT"
] | null | null | null |
mundo 2 (for,if)/back.py
|
Pedroluis1/python
|
d949fa2646c049aa51a41a32dc62de7b14eae90f
|
[
"MIT"
] | null | null | null |
valores = []
while True:
valores.append(int(input('Digite um valor: ')))
y = input('deseja continuar? s/n ')
if y != 's'or'S':
break
else:
while True:
valores.append(int(input('Digite um valor: ')))
y = input('deseja continuar? s/n ')
| 24.333333
| 59
| 0.530822
| 38
| 292
| 4.078947
| 0.473684
| 0.116129
| 0.206452
| 0.283871
| 0.851613
| 0.851613
| 0.851613
| 0.851613
| 0.851613
| 0.851613
| 0
| 0
| 0.308219
| 292
| 11
| 60
| 26.545455
| 0.767327
| 0
| 0
| 0.6
| 0
| 0
| 0.274914
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0b4c690bbce33af48d652eea607ac82693e529d9
| 110
|
py
|
Python
|
src/main/resources/assets/openpython/opos/v1.1/lib/micropython/machine/__init__.py
|
fossabot/OpenPython
|
8fe3f794f2a6c543d96c1ef5c097ffa18f90b680
|
[
"PSF-2.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | 126
|
2019-07-19T14:42:41.000Z
|
2022-03-21T22:22:19.000Z
|
src/main/resources/assets/openpython/opos/v1.1/lib/micropython/machine/__init__.py
|
fossabot/OpenPython
|
8fe3f794f2a6c543d96c1ef5c097ffa18f90b680
|
[
"PSF-2.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | 38
|
2019-08-28T01:46:31.000Z
|
2022-03-17T05:46:51.000Z
|
src/main/resources/assets/openpython/opos/v1.1/lib/micropython/machine/__init__.py
|
fossabot/OpenPython
|
8fe3f794f2a6c543d96c1ef5c097ffa18f90b680
|
[
"PSF-2.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | 55
|
2019-08-02T09:32:33.000Z
|
2021-12-22T11:25:51.000Z
|
from umachine import *
from .timer import *
from .pin import *
def unique_id():
return b"upy-non-unique"
| 15.714286
| 28
| 0.7
| 17
| 110
| 4.470588
| 0.705882
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190909
| 110
| 6
| 29
| 18.333333
| 0.853933
| 0
| 0
| 0
| 0
| 0
| 0.127273
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.6
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
0b527806982275e6c8dad0dd244e4d9daa43b95f
| 1,925
|
py
|
Python
|
aydin/it/balancing/test/test_data_histogram_balancer.py
|
royerloic/aydin
|
f9c61a24030891d008c318b250da5faec69fcd7d
|
[
"BSD-3-Clause"
] | 78
|
2021-11-08T16:11:23.000Z
|
2022-03-27T17:51:04.000Z
|
aydin/it/balancing/test/test_data_histogram_balancer.py
|
royerloic/aydin
|
f9c61a24030891d008c318b250da5faec69fcd7d
|
[
"BSD-3-Clause"
] | 19
|
2021-11-08T17:15:40.000Z
|
2022-03-30T17:46:55.000Z
|
aydin/it/balancing/test/test_data_histogram_balancer.py
|
royerloic/aydin
|
f9c61a24030891d008c318b250da5faec69fcd7d
|
[
"BSD-3-Clause"
] | 7
|
2021-11-09T17:42:32.000Z
|
2022-03-09T00:37:57.000Z
|
import numpy
from aydin.io.datasets import camera, normalise
from aydin.it.balancing.data_histogram_balancer import DataHistogramBalancer
def test_no_balancing():
balancer = DataHistogramBalancer(keep_ratio=0.5, balance=False)
image = normalise(camera().astype(numpy.float32, copy=False)).ravel()
balancer.calibrate(image, batch_length=16)
entries = [image[i : i + 16] for i in range(0, image.size, 16)]
balancer.initialise(len(entries))
count_accepted = 0
for entry in entries:
accepted = balancer.add_entry(entry)
if accepted:
count_accepted += 1
print(f"accepted: {count_accepted} / {len(entries)}")
assert (0.5 - count_accepted / len(entries)) < 0.01
def test_balancing():
balancer = DataHistogramBalancer(keep_ratio=0.5, balance=True)
image = normalise(camera().astype(numpy.float32)).ravel()
balancer.calibrate(image, batch_length=16)
entries = [image[i : i + 16] for i in range(0, image.size, 16)]
balancer.initialise(len(entries))
count_accepted = 0
for entry in entries:
accepted = balancer.add_entry(entry)
if accepted:
count_accepted += 1
print(f"accepted: {count_accepted} / {len(entries)}")
assert count_accepted / len(entries) < 0.4
def test_multiple_runs():
balancer = DataHistogramBalancer(keep_ratio=0.5, balance=True)
image = normalise(camera().astype(numpy.float32)).ravel()
balancer.calibrate(image, batch_length=16)
entries = [image[i : i + 16] for i in range(0, image.size, 16)]
for j in range(10):
balancer.initialise(len(entries))
count_accepted = 0
for entry in entries:
accepted = balancer.add_entry(entry)
if accepted:
count_accepted += 1
print(f"accepted: {count_accepted} / {len(entries)}")
assert count_accepted / len(entries) < 0.4
| 24.679487
| 76
| 0.65974
| 245
| 1,925
| 5.069388
| 0.228571
| 0.125604
| 0.101449
| 0.111111
| 0.851047
| 0.831723
| 0.801127
| 0.801127
| 0.748792
| 0.748792
| 0
| 0.033445
| 0.223377
| 1,925
| 77
| 77
| 25
| 0.797324
| 0
| 0
| 0.767442
| 0
| 0
| 0.067013
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 1
| 0.069767
| false
| 0
| 0.069767
| 0
| 0.139535
| 0.069767
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0ba713ea5383515e5a4c8120c36affe9e01c6adc
| 44,930
|
py
|
Python
|
core/domain/user_id_migration_test.py
|
davehenton/oppia
|
62a9e9ea8458632e39b8ab4cf15b0489ac1acad9
|
[
"Apache-2.0"
] | 1
|
2021-01-22T03:24:52.000Z
|
2021-01-22T03:24:52.000Z
|
core/domain/user_id_migration_test.py
|
davehenton/oppia
|
62a9e9ea8458632e39b8ab4cf15b0489ac1acad9
|
[
"Apache-2.0"
] | null | null | null |
core/domain/user_id_migration_test.py
|
davehenton/oppia
|
62a9e9ea8458632e39b8ab4cf15b0489ac1acad9
|
[
"Apache-2.0"
] | 1
|
2020-06-25T21:43:01.000Z
|
2020-06-25T21:43:01.000Z
|
# coding: utf-8
#
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for user-related one-off computations."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import ast
from constants import constants
from core.domain import user_id_migration
from core.platform import models
from core.tests import test_utils
import feconf
(
activity_models, base_models,
collection_models, exp_models,
feedback_models, question_models, skill_models,
topic_models, user_models) = models.Registry.import_models(
[models.NAMES.activity, models.NAMES.base_model,
models.NAMES.collection, models.NAMES.exploration,
models.NAMES.feedback, models.NAMES.question, models.NAMES.skill,
models.NAMES.topic, models.NAMES.user])
taskqueue_services = models.Registry.import_taskqueue_services()
search_services = models.Registry.import_search_services()
class UserIdMigrationJobTests(test_utils.GenericTestBase):
"""Tests for UserIdMigrationJobTests."""
EXP_ID_1 = 'exp_id_1'
EXP_ID_2 = 'exp_id_2'
USER_A_EMAIL = 'a@example.com'
USER_A_USERNAME = 'a'
USER_B_EMAIL = 'b@example.com'
USER_B_USERNAME = 'b'
USER_C_EMAIL = 'c@example.com'
USER_C_USERNAME = 'c'
USER_D_EMAIL = 'd@example.com'
USER_D_USERNAME = 'd'
def _get_migrated_model_ids(self, job_output):
"""Get successfully migrated model IDs."""
for item in job_output:
if item[0] == 'SUCCESS':
migrated_model_ids = sorted(item[1], key=lambda item: item[0])
migrated_model_ids = [item[1] for item in migrated_model_ids]
return migrated_model_ids
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_id_migration.UserIdMigrationJob.create_new()
user_id_migration.UserIdMigrationJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
stringified_output = (
user_id_migration.UserIdMigrationJob.get_output(job_id))
eval_output = []
for stringified_item in stringified_output:
items = ast.literal_eval(stringified_item)
user_ids = [ast.literal_eval(item) for item in items[1]]
eval_output.append([items[0], user_ids])
return eval_output
def setUp(self):
def empty(*_):
"""Function that takes any number of arguments and does nothing."""
pass
# We don't want to signup the superadmin user.
with self.swap(test_utils.TestBase, 'signup_superadmin_user', empty):
super(UserIdMigrationJobTests, self).setUp()
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.user_a_id = self.get_user_id_from_email(self.USER_A_EMAIL)
def test_repeated_migration(self):
self._run_one_off_job()
output = self._run_one_off_job()
self.assertIn(['ALREADY DONE', [(self.user_a_id, '')]], output)
def test_one_user_one_model_full_id(self):
original_model = user_models.CompletedActivitiesModel(
id=self.user_a_id,
exploration_ids=['1', '2'],
collection_ids=['1', '2'])
original_model.put()
migrated_model_ids = self._get_migrated_model_ids(
self._run_one_off_job())
migrated_model = (
user_models.CompletedActivitiesModel.get_by_id(
migrated_model_ids[0]))
self.assertNotEqual(
original_model.id, migrated_model.id)
self.assertEqual(
original_model.exploration_ids, migrated_model.exploration_ids)
self.assertEqual(
original_model.collection_ids, migrated_model.collection_ids)
self.assertEqual(
original_model.created_on, migrated_model.created_on)
self.assertEqual(
original_model.last_updated, migrated_model.last_updated)
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(self.user_a_id))
def test_multiple_users_one_model_full_id(self):
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
user_b_id = self.get_user_id_from_email(self.USER_B_EMAIL)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
user_c_id = self.get_user_id_from_email(self.USER_C_EMAIL)
original_models = []
original_models.append(user_models.CompletedActivitiesModel(
id=self.user_a_id,
exploration_ids=['1', '2'],
collection_ids=['11', '22']))
original_models[-1].put()
original_models.append(user_models.CompletedActivitiesModel(
id=user_b_id,
exploration_ids=['3', '4'],
collection_ids=['33', '44']))
original_models[-1].put()
original_models.append(user_models.CompletedActivitiesModel(
id=user_c_id,
exploration_ids=['5', '6'],
collection_ids=['55', '66']))
original_models[-1].put()
original_models.sort(key=lambda model: model.id)
migrated_model_ids = self._get_migrated_model_ids(
self._run_one_off_job())
for i, model_id in enumerate(migrated_model_ids):
migrated_model = (
user_models.CompletedActivitiesModel.get_by_id(model_id))
self.assertNotEqual(
original_models[i].id, migrated_model.id)
self.assertEqual(
original_models[i].exploration_ids,
migrated_model.exploration_ids)
self.assertEqual(
original_models[i].collection_ids,
migrated_model.collection_ids)
self.assertEqual(
original_models[i].created_on, migrated_model.created_on)
self.assertEqual(
original_models[i].last_updated, migrated_model.last_updated)
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(self.user_a_id))
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(user_b_id))
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(user_c_id))
def test_one_user_one_model_part_id(self):
original_model = user_models.ExpUserLastPlaythroughModel(
id='%s.%s' % (self.user_a_id, 'exp_id'),
user_id=self.user_a_id,
exploration_id='exp_id',
last_played_exp_version=2,
last_played_state_name='start')
original_model.put()
migrated_model_ids = self._get_migrated_model_ids(
self._run_one_off_job())
migrated_model = (
user_models.ExpUserLastPlaythroughModel.get_by_id(
'%s.%s' % (migrated_model_ids[0], 'exp_id')))
self.assertNotEqual(
original_model.id, migrated_model.id)
self.assertNotEqual(
original_model.user_id, migrated_model.user_id)
self.assertEqual(
original_model.exploration_id, migrated_model.exploration_id)
self.assertEqual(
original_model.last_played_exp_version,
migrated_model.last_played_exp_version)
self.assertEqual(
original_model.last_played_state_name,
migrated_model.last_played_state_name)
self.assertEqual(
original_model.created_on, migrated_model.created_on)
self.assertEqual(
original_model.last_updated, migrated_model.last_updated)
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(self.user_a_id))
def test_one_user_different_one_model_part_id(self):
original_model = user_models.UserContributionScoringModel(
id='%s.%s' % ('category', self.user_a_id),
user_id=self.user_a_id,
score_category='category',
score=1.5,
has_email_been_sent=False
)
original_model.put()
migrated_model_ids = self._get_migrated_model_ids(
self._run_one_off_job())
migrated_model = (
user_models.UserContributionScoringModel.get_by_id(
'%s.%s' % ('category', migrated_model_ids[0])))
self.assertNotEqual(
original_model.id, migrated_model.id)
self.assertNotEqual(
original_model.user_id, migrated_model.user_id)
self.assertEqual(
original_model.score_category, migrated_model.score_category)
self.assertEqual(original_model.score, migrated_model.score)
self.assertEqual(
original_model.has_email_been_sent,
migrated_model.has_email_been_sent)
self.assertEqual(original_model.created_on, migrated_model.created_on)
self.assertEqual(
original_model.last_updated, migrated_model.last_updated)
self.assertIsNone(user_models.UserContributionScoringModel.get_by_id(
'%s.%s' % ('category', self.user_a_id)))
def test_multiple_users_one_model_part_id(self):
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
user_b_id = self.get_user_id_from_email(self.USER_B_EMAIL)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
user_c_id = self.get_user_id_from_email(self.USER_C_EMAIL)
original_models = []
original_models.append(user_models.ExpUserLastPlaythroughModel(
id='%s.%s' % (self.user_a_id, 'exp_id'),
user_id=self.user_a_id,
exploration_id='exp_id',
last_played_exp_version=2,
last_played_state_name='start'))
original_models[-1].put()
original_models.append(user_models.ExpUserLastPlaythroughModel(
id='%s.%s' % (user_b_id, 'exp_id'),
user_id=user_b_id,
exploration_id='exp_id',
last_played_exp_version=3,
last_played_state_name='start'))
original_models[-1].put()
original_models.append(user_models.ExpUserLastPlaythroughModel(
id='%s.%s' % (user_c_id, 'exp_id'),
user_id=user_c_id,
exploration_id='exp_id',
last_played_exp_version=4,
last_played_state_name='start'))
original_models[-1].put()
original_models.sort(key=lambda model: model.user_id)
migrated_model_ids = self._get_migrated_model_ids(
self._run_one_off_job())
for i, model_id in enumerate(migrated_model_ids):
migrated_model = (
user_models.ExpUserLastPlaythroughModel.get_by_id(
'%s.%s' % (model_id, 'exp_id')))
self.assertNotEqual(
original_models[i].id, migrated_model.id)
self.assertNotEqual(
original_models[i].user_id, migrated_model.user_id)
self.assertEqual(
original_models[i].exploration_id,
migrated_model.exploration_id)
self.assertEqual(
original_models[i].last_played_exp_version,
migrated_model.last_played_exp_version)
self.assertEqual(
original_models[i].last_played_state_name,
migrated_model.last_played_state_name)
self.assertEqual(
original_models[i].created_on, migrated_model.created_on)
self.assertEqual(
original_models[i].last_updated, migrated_model.last_updated)
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(self.user_a_id))
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(user_b_id))
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(user_c_id))
def test_multiple_users_different_one_model_part_id(self):
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
user_b_id = self.get_user_id_from_email(self.USER_B_EMAIL)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
user_c_id = self.get_user_id_from_email(self.USER_C_EMAIL)
original_models = []
original_models.append(user_models.UserContributionScoringModel(
id='%s.%s' % ('score_category', self.user_a_id),
user_id=self.user_a_id,
score_category='score_category',
score=2,
has_email_been_sent=False))
original_models[-1].put()
original_models.append(user_models.UserContributionScoringModel(
id='%s.%s' % ('score_category', user_b_id),
user_id=user_b_id,
score_category='score_category',
score=2,
has_email_been_sent=False))
original_models[-1].put()
original_models.append(user_models.UserContributionScoringModel(
id='%s.%s' % ('score_category', user_c_id),
user_id=user_c_id,
score_category='score_category',
score=2,
has_email_been_sent=False))
original_models[-1].put()
original_models.sort(key=lambda model: model.user_id)
migrated_model_ids = self._get_migrated_model_ids(
self._run_one_off_job())
for i, model_id in enumerate(migrated_model_ids):
migrated_model = (
user_models.UserContributionScoringModel.get_by_id(
'%s.%s' % ('score_category', model_id)))
self.assertNotEqual(
original_models[i].id, migrated_model.id)
self.assertNotEqual(
original_models[i].user_id, migrated_model.user_id)
self.assertEqual(
original_models[i].score_category,
migrated_model.score_category)
self.assertEqual(original_models[i].score, migrated_model.score)
self.assertEqual(
original_models[i].has_email_been_sent,
migrated_model.has_email_been_sent)
self.assertEqual(
original_models[i].created_on, migrated_model.created_on)
self.assertEqual(
original_models[i].last_updated, migrated_model.last_updated)
def test_one_user_one_model_user_id_field(self):
original_model = exp_models.ExplorationSnapshotMetadataModel(
id='instance_id',
committer_id=self.user_a_id,
commit_type='create',
commit_message='commit message 2',
commit_cmds=[{'cmd': 'some_command'}])
original_model.put()
migrated_model_ids = self._get_migrated_model_ids(
self._run_one_off_job())
migrated_model = (
exp_models.ExplorationSnapshotMetadataModel.query(
exp_models.ExplorationSnapshotMetadataModel.committer_id ==
migrated_model_ids[0]
).get())
self.assertNotEqual(
original_model.committer_id, migrated_model.committer_id)
self.assertEqual(original_model.id, migrated_model.id)
self.assertEqual(
original_model.commit_type, migrated_model.commit_type)
self.assertEqual(
original_model.commit_message, migrated_model.commit_message)
self.assertEqual(
original_model.commit_cmds, migrated_model.commit_cmds)
self.assertEqual(
original_model.created_on, migrated_model.created_on)
self.assertEqual(
original_model.last_updated, migrated_model.last_updated)
def test_multiple_users_one_model_user_id_field(self):
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
user_b_id = self.get_user_id_from_email(self.USER_B_EMAIL)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
user_c_id = self.get_user_id_from_email(self.USER_C_EMAIL)
original_models = []
original_models.append(exp_models.ExplorationSnapshotMetadataModel(
id='instance_id1',
committer_id=self.user_a_id,
commit_type='create',
commit_message='commit message 2',
commit_cmds=[{'cmd': 'some_command'}]))
original_models[-1].put()
original_models.append(exp_models.ExplorationSnapshotMetadataModel(
id='instance_id2',
committer_id=user_b_id,
commit_type='create',
commit_message='commit message 2',
commit_cmds=[{'cmd': 'some_command'}]))
original_models[-1].put()
original_models.append(exp_models.ExplorationSnapshotMetadataModel(
id='instance_id3',
committer_id=user_c_id,
commit_type='create',
commit_message='commit message 2',
commit_cmds=[{'cmd': 'some_command'}]))
original_models[-1].put()
original_models.sort(key=lambda model: model.committer_id)
migrated_model_ids = self._get_migrated_model_ids(
self._run_one_off_job())
for i, model_id in enumerate(migrated_model_ids):
migrated_model = (
exp_models.ExplorationSnapshotMetadataModel.query(
exp_models.ExplorationSnapshotMetadataModel.committer_id ==
model_id
).get())
self.assertNotEqual(
original_models[i].committer_id, migrated_model.committer_id)
self.assertEqual(original_models[i].id, migrated_model.id)
self.assertEqual(
original_models[i].commit_type, migrated_model.commit_type)
self.assertEqual(
original_models[i].commit_message,
migrated_model.commit_message)
self.assertEqual(
original_models[i].commit_cmds, migrated_model.commit_cmds)
self.assertEqual(
original_models[i].created_on, migrated_model.created_on)
self.assertEqual(
original_models[i].last_updated, migrated_model.last_updated)
class SnapshotsUserIdMigrationJobTests(test_utils.GenericTestBase):
"""Tests for SnapshotsUserIdMigrationJobTests."""
SNAPSHOT_ID = '2'
USER_1_USER_ID = 'user_id_1'
USER_1_GAE_ID = 'gae_id_1'
USER_2_USER_ID = 'user_id_2'
USER_2_GAE_ID = 'gae_id_2'
USER_3_USER_ID = 'user_id_3'
USER_3_GAE_ID = 'gae_id_3'
WRONG_GAE_ID = 'wrong_id'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_id_migration.SnapshotsUserIdMigrationJob.create_new()
user_id_migration.SnapshotsUserIdMigrationJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
stringified_output = (
user_id_migration.SnapshotsUserIdMigrationJob.get_output(job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
return eval_output
def setUp(self):
def empty(*_):
"""Function that takes any number of arguments and does nothing."""
pass
# We don't want to signup the superadmin user.
with self.swap(test_utils.TestBase, 'signup_superadmin_user', empty):
super(SnapshotsUserIdMigrationJobTests, self).setUp()
user_models.UserSettingsModel(
id=self.USER_1_USER_ID,
gae_id=self.USER_1_GAE_ID,
email='some@email.com',
role=feconf.ROLE_ID_COLLECTION_EDITOR
).put()
user_models.UserSettingsModel(
id=self.USER_2_USER_ID,
gae_id=self.USER_2_GAE_ID,
email='some.different@email.com',
role=feconf.ROLE_ID_COLLECTION_EDITOR
).put()
user_models.UserSettingsModel(
id=self.USER_3_USER_ID,
gae_id=self.USER_3_GAE_ID,
email='some.different@email.cz',
role=feconf.ROLE_ID_COLLECTION_EDITOR
).put()
def test_migrate_collection_rights_snapshot_model(self):
original_rights_model = collection_models.CollectionRightsModel(
id=self.SNAPSHOT_ID,
owner_ids=[self.USER_1_GAE_ID, self.USER_2_GAE_ID],
editor_ids=[self.USER_1_GAE_ID, feconf.SYSTEM_COMMITTER_ID],
voice_artist_ids=[self.USER_1_GAE_ID, self.USER_2_GAE_ID],
viewer_ids=[self.USER_1_GAE_ID, self.USER_3_GAE_ID],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0
)
original_rights_snapshot_model = (
collection_models.CollectionRightsSnapshotContentModel(
id=self.SNAPSHOT_ID,
content=original_rights_model.to_dict()))
original_rights_snapshot_model.put()
output = self._run_one_off_job()
self.assertEqual(
output[0], [u'SUCCESS - CollectionRightsSnapshotContentModel', 1])
migrated_rights_snapshot_model = (
collection_models.CollectionRightsSnapshotContentModel.get_by_id(
self.SNAPSHOT_ID))
self.assertEqual(
original_rights_snapshot_model.last_updated,
migrated_rights_snapshot_model.last_updated)
migrated_rights_model = collection_models.CollectionRightsModel(
**migrated_rights_snapshot_model.content)
self.assertEqual(
[self.USER_1_USER_ID, self.USER_2_USER_ID],
migrated_rights_model.owner_ids)
self.assertEqual(
[self.USER_1_USER_ID, feconf.SYSTEM_COMMITTER_ID],
migrated_rights_model.editor_ids)
self.assertEqual(
[self.USER_1_USER_ID, self.USER_2_USER_ID],
migrated_rights_model.voice_artist_ids)
self.assertEqual(
[self.USER_1_USER_ID, self.USER_3_USER_ID],
migrated_rights_model.viewer_ids)
def test_migrate_collection_rights_snapshot_model_wrong_id(self):
original_rights_model = collection_models.CollectionRightsModel(
id=self.SNAPSHOT_ID,
owner_ids=[self.WRONG_GAE_ID],
editor_ids=[self.WRONG_GAE_ID],
voice_artist_ids=[self.WRONG_GAE_ID],
viewer_ids=[self.WRONG_GAE_ID],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0
)
original_rights_snapshot_model = (
collection_models.CollectionRightsSnapshotContentModel(
id=self.SNAPSHOT_ID,
content=original_rights_model.to_dict()))
original_rights_snapshot_model.put()
output = self._run_one_off_job()
self.assertIn(
['FAILURE - CollectionRightsSnapshotContentModel',
[self.WRONG_GAE_ID]],
output)
def test_migrate_exp_rights_snapshot_model(self):
original_rights_model = exp_models.ExplorationRightsModel(
id=self.SNAPSHOT_ID,
owner_ids=[self.USER_1_GAE_ID, self.USER_2_GAE_ID],
editor_ids=[self.USER_1_GAE_ID, feconf.SYSTEM_COMMITTER_ID],
voice_artist_ids=[self.USER_1_GAE_ID, self.USER_2_GAE_ID],
viewer_ids=[self.USER_1_GAE_ID, self.USER_3_GAE_ID],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0)
original_rights_snapshot_model = (
exp_models.ExplorationRightsSnapshotContentModel(
id=self.SNAPSHOT_ID,
content=original_rights_model.to_dict()))
original_rights_snapshot_model.put()
output = self._run_one_off_job()
self.assertEqual(
output[0],
[u'SUCCESS - ExplorationRightsSnapshotContentModel', 1])
migrated_rights_snapshot_model = (
exp_models.ExplorationRightsSnapshotContentModel.get_by_id(
self.SNAPSHOT_ID))
self.assertEqual(
original_rights_snapshot_model.last_updated,
migrated_rights_snapshot_model.last_updated)
migrated_rights_model = exp_models.ExplorationRightsModel(
**migrated_rights_snapshot_model.content)
self.assertEqual(
[self.USER_1_USER_ID, self.USER_2_USER_ID],
migrated_rights_model.owner_ids)
self.assertEqual(
[self.USER_1_USER_ID, feconf.SYSTEM_COMMITTER_ID],
migrated_rights_model.editor_ids)
self.assertEqual(
[self.USER_1_USER_ID, self.USER_2_USER_ID],
migrated_rights_model.voice_artist_ids)
self.assertEqual(
[self.USER_1_USER_ID, self.USER_3_USER_ID],
migrated_rights_model.viewer_ids)
def test_migrate_exp_rights_snapshot_model_wrong_id(self):
original_rights_model = exp_models.ExplorationRightsModel(
id=self.SNAPSHOT_ID,
owner_ids=[self.WRONG_GAE_ID],
editor_ids=[self.WRONG_GAE_ID],
voice_artist_ids=[self.WRONG_GAE_ID],
viewer_ids=[self.WRONG_GAE_ID],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0
)
original_rights_snapshot_model = (
exp_models.ExplorationRightsSnapshotContentModel(
id=self.SNAPSHOT_ID,
content=original_rights_model.to_dict()))
original_rights_snapshot_model.put()
output = self._run_one_off_job()
self.assertIn(
['FAILURE - ExplorationRightsSnapshotContentModel',
[self.WRONG_GAE_ID]],
output)
def test_migrate_exp_rights_snapshot_model_wrong_field(self):
original_rights_model = exp_models.ExplorationRightsModel(
id=self.SNAPSHOT_ID,
owner_ids=[self.USER_1_GAE_ID, self.USER_2_GAE_ID],
editor_ids=[self.USER_1_GAE_ID, feconf.SYSTEM_COMMITTER_ID],
voice_artist_ids=[self.USER_1_GAE_ID, self.USER_2_GAE_ID],
viewer_ids=[self.USER_1_GAE_ID, self.USER_3_GAE_ID],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0)
original_rights_snapshot_model = (
exp_models.ExplorationRightsSnapshotContentModel(
id=self.SNAPSHOT_ID,
content=original_rights_model.to_dict()))
original_rights_snapshot_model.content['all_viewer_ids'] = ['id1']
original_rights_snapshot_model.put()
output = self._run_one_off_job()
self.assertEqual(
output[0],
[u'SUCCESS - ExplorationRightsSnapshotContentModel', 1])
migrated_rights_snapshot_model = (
exp_models.ExplorationRightsSnapshotContentModel.get_by_id(
self.SNAPSHOT_ID))
self.assertEqual(
original_rights_snapshot_model.last_updated,
migrated_rights_snapshot_model.last_updated)
self.assertNotIn(
'all_viewer_ids', migrated_rights_snapshot_model.content)
migrated_rights_model = exp_models.ExplorationRightsModel(
**migrated_rights_snapshot_model.content)
self.assertEqual(
[self.USER_1_USER_ID, self.USER_2_USER_ID],
migrated_rights_model.owner_ids)
self.assertEqual(
[self.USER_1_USER_ID, feconf.SYSTEM_COMMITTER_ID],
migrated_rights_model.editor_ids)
self.assertEqual(
[self.USER_1_USER_ID, self.USER_2_USER_ID],
migrated_rights_model.voice_artist_ids)
self.assertEqual(
[self.USER_1_USER_ID, self.USER_3_USER_ID],
migrated_rights_model.viewer_ids)
def test_migrate_question_rights_snapshot_model(self):
original_rights_model = question_models.QuestionRightsModel(
id=self.SNAPSHOT_ID,
creator_id=self.USER_1_GAE_ID)
original_rights_snapshot_model = (
question_models.QuestionRightsSnapshotContentModel(
id=self.SNAPSHOT_ID,
content=original_rights_model.to_dict()))
original_rights_snapshot_model.put()
output = self._run_one_off_job()
self.assertEqual(
output[0], [u'SUCCESS - QuestionRightsSnapshotContentModel', 1])
migrated_rights_snapshot_model = (
question_models.QuestionRightsSnapshotContentModel.get_by_id(
self.SNAPSHOT_ID))
self.assertEqual(
original_rights_snapshot_model.last_updated,
migrated_rights_snapshot_model.last_updated)
migrated_rights_model = question_models.QuestionRightsModel(
**migrated_rights_snapshot_model.content)
self.assertEqual(self.USER_1_USER_ID, migrated_rights_model.creator_id)
def test_migrate_question_rights_snapshot_model_wrong_id(self):
original_rights_model = question_models.QuestionRightsModel(
id=self.SNAPSHOT_ID,
creator_id=self.WRONG_GAE_ID)
original_rights_snapshot_model = (
question_models.QuestionRightsSnapshotContentModel(
id=self.SNAPSHOT_ID,
content=original_rights_model.to_dict()))
original_rights_snapshot_model.put()
output = self._run_one_off_job()
self.assertIn(
['FAILURE - QuestionRightsSnapshotContentModel',
[self.WRONG_GAE_ID]],
output)
def test_migrate_skill_rights_snapshot_model(self):
original_rights_model = skill_models.SkillRightsModel(
id=self.SNAPSHOT_ID,
creator_id=self.USER_1_GAE_ID)
original_rights_snapshot_model = (
skill_models.SkillRightsSnapshotContentModel(
id=self.SNAPSHOT_ID,
content=original_rights_model.to_dict()))
original_rights_snapshot_model.put()
output = self._run_one_off_job()
self.assertEqual(
output[0], [u'SUCCESS - SkillRightsSnapshotContentModel', 1])
migrated_rights_snapshot_model = (
skill_models.SkillRightsSnapshotContentModel.get_by_id(
self.SNAPSHOT_ID))
self.assertEqual(
original_rights_snapshot_model.last_updated,
migrated_rights_snapshot_model.last_updated)
migrated_rights_model = skill_models.SkillRightsModel(
**migrated_rights_snapshot_model.content)
self.assertEqual(self.USER_1_USER_ID, migrated_rights_model.creator_id)
def test_migrate_skill_rights_snapshot_model_wrong_id(self):
original_rights_model = skill_models.SkillRightsModel(
id=self.SNAPSHOT_ID,
creator_id=self.WRONG_GAE_ID)
original_rights_snapshot_model = (
skill_models.SkillRightsSnapshotContentModel(
id=self.SNAPSHOT_ID,
content=original_rights_model.to_dict()))
original_rights_snapshot_model.put()
output = self._run_one_off_job()
self.assertIn(
['FAILURE - SkillRightsSnapshotContentModel',
[self.WRONG_GAE_ID]],
output)
def test_migrate_topic_rights_snapshot_model(self):
original_rights_model = topic_models.TopicRightsModel(
manager_ids=[self.USER_1_GAE_ID, self.USER_2_GAE_ID,
feconf.SYSTEM_COMMITTER_ID])
original_rights_snapshot_model = (
topic_models.TopicRightsSnapshotContentModel(
id=self.SNAPSHOT_ID,
content=original_rights_model.to_dict()))
original_rights_snapshot_model.put()
output = self._run_one_off_job()
self.assertEqual(
output[0], [u'SUCCESS - TopicRightsSnapshotContentModel', 1])
migrated_rights_snapshot_model = (
topic_models.TopicRightsSnapshotContentModel.get_by_id(
self.SNAPSHOT_ID))
self.assertEqual(
original_rights_snapshot_model.last_updated,
migrated_rights_snapshot_model.last_updated)
migrated_rights_model = topic_models.TopicRightsModel(
**migrated_rights_snapshot_model.content)
self.assertEqual(
[self.USER_1_USER_ID, self.USER_2_USER_ID,
feconf.SYSTEM_COMMITTER_ID],
migrated_rights_model.manager_ids)
def test_migrate_topic_rights_snapshot_model_wrong_id(self):
original_rights_model = topic_models.TopicRightsModel(
manager_ids=[self.WRONG_GAE_ID])
original_rights_snapshot_model = (
topic_models.TopicRightsSnapshotContentModel(
id=self.SNAPSHOT_ID,
content=original_rights_model.to_dict()))
original_rights_snapshot_model.put()
output = self._run_one_off_job()
self.assertIn(
['FAILURE - TopicRightsSnapshotContentModel',
[self.WRONG_GAE_ID]],
output)
class GaeIdNotInModelsVerificationJobTests(test_utils.GenericTestBase):
"""Tests for GaeIdNotInModelsVerificationJob."""
USER_1_USER_ID = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
USER_1_GAE_ID = 'gae_id_1'
USER_2_USER_ID = 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'
USER_2_GAE_ID = 'gae_id_2'
USER_3_USER_ID = 'cccccccccccccccccccccccccccccccc'
USER_3_GAE_ID = 'gae_id_3'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_id_migration.GaeIdNotInModelsVerificationJob.create_new()
user_id_migration.GaeIdNotInModelsVerificationJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
stringified_output = (
user_id_migration.GaeIdNotInModelsVerificationJob.get_output(
job_id))
eval_output = []
for stringified_item in stringified_output:
item = ast.literal_eval(stringified_item)
item[1] = [ast.literal_eval(ids) for ids in item[1]]
eval_output.append(item)
return eval_output
def setUp(self):
def empty(*_):
"""Function that takes any number of arguments and does nothing."""
pass
# We don't want to signup the superadmin user.
with self.swap(test_utils.TestBase, 'signup_superadmin_user', empty):
super(GaeIdNotInModelsVerificationJobTests, self).setUp()
user_models.UserSettingsModel(
id=self.USER_1_USER_ID,
gae_id=self.USER_1_GAE_ID,
email='some@email.com',
role=feconf.ROLE_ID_COLLECTION_EDITOR
).put()
user_models.UserSettingsModel(
id=self.USER_2_USER_ID,
gae_id=self.USER_2_GAE_ID,
email='some.different@email.com',
role=feconf.ROLE_ID_COLLECTION_EDITOR
).put()
user_models.UserSettingsModel(
id=self.USER_3_USER_ID,
gae_id=self.USER_3_GAE_ID,
email='some.different@email.cz',
role=feconf.ROLE_ID_COLLECTION_EDITOR
).put()
def test_wrong_user_ids(self):
user_models.UserSettingsModel(
id='aa',
gae_id=self.USER_1_GAE_ID,
email='some@email.com',
role=feconf.ROLE_ID_COLLECTION_EDITOR
).put()
user_models.UserSettingsModel(
id='AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA',
gae_id=self.USER_2_GAE_ID,
email='some.different@email.com',
role=feconf.ROLE_ID_COLLECTION_EDITOR
).put()
output = self._run_one_off_job()
output = [
key[1] for key in output if key[0] == 'FAILURE - WRONG ID FORMAT'
][0]
self.assertEqual(len(output), 2)
self.assertIn(('gae_id_1', 'aa'), output)
self.assertIn(('gae_id_2', 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'), output)
def test_failure(self):
user_models.CompletedActivitiesModel(
id=self.USER_1_GAE_ID,
exploration_ids=['1', '2'],
collection_ids=['1', '2']
).put()
user_models.CompletedActivitiesModel(
id=self.USER_2_GAE_ID,
exploration_ids=['1', '2'],
collection_ids=['1', '2']
).put()
user_models.ExpUserLastPlaythroughModel(
id='%s.%s' % (self.USER_3_GAE_ID, 'exp_id'),
user_id=self.USER_3_GAE_ID,
exploration_id='exp_id',
last_played_exp_version=2,
last_played_state_name='start'
).put()
original_rights_model = skill_models.SkillRightsModel(
id='1', creator_id=self.USER_1_GAE_ID)
original_rights_snapshot_model = (
skill_models.SkillRightsSnapshotContentModel(
id='1', content=original_rights_model.to_dict()))
original_rights_snapshot_model.put()
# Model with DELETION_POLICY equal to NOT_APPLICABLE.
activity_models.ActivityReferencesModel(id='some_id').put()
output = self._run_one_off_job()
self.assertNotIn('SUCCESS', [key[0] for key in output])
output = [
key[1] for key in output
if key[0] == 'FAILURE - HAS REFERENCE TO GAE ID'][0]
self.assertEqual(len(output), 4)
self.assertIn((self.USER_1_GAE_ID, 'SkillRightsModel'), output)
self.assertIn((self.USER_1_GAE_ID, 'CompletedActivitiesModel'), output)
self.assertIn((self.USER_2_GAE_ID, 'CompletedActivitiesModel'), output)
self.assertIn(
(self.USER_3_GAE_ID, 'ExpUserLastPlaythroughModel'), output)
def test_success(self):
output = self._run_one_off_job()
output = [key[1] for key in output if key[0] == 'SUCCESS'][0]
self.assertEqual(len(output), 3)
self.assertIn((self.USER_1_GAE_ID, self.USER_1_USER_ID), output)
self.assertIn((self.USER_2_GAE_ID, self.USER_2_USER_ID), output)
self.assertIn((self.USER_3_GAE_ID, self.USER_3_USER_ID), output)
class ModelsUserIdsHaveUserSettingsVerificationJobTests(
test_utils.GenericTestBase):
"""Tests for ModelsUserIdsHaveUserSettingsVerificationJob."""
USER_1_USER_ID = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
USER_1_GAE_ID = 'gae_id_1'
USER_2_USER_ID = 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'
USER_2_GAE_ID = 'gae_id_2'
USER_3_USER_ID = 'cccccccccccccccccccccccccccccccc'
USER_3_GAE_ID = 'gae_id_3'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_id_migration.ModelsUserIdsHaveUserSettingsVerificationJob
.create_new())
(user_id_migration.ModelsUserIdsHaveUserSettingsVerificationJob
.enqueue(job_id))
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
stringified_output = (
user_id_migration.ModelsUserIdsHaveUserSettingsVerificationJob
.get_output(job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
return eval_output
def setUp(self):
def empty(*_):
"""Function that takes any number of arguments and does nothing."""
pass
# We don't want to signup the superadmin user.
with self.swap(test_utils.TestBase, 'signup_superadmin_user', empty):
super(
ModelsUserIdsHaveUserSettingsVerificationJobTests, self).setUp()
user_models.UserSettingsModel(
id=self.USER_1_USER_ID,
gae_id=self.USER_1_GAE_ID,
email='some@email.com',
role=feconf.ROLE_ID_COLLECTION_EDITOR
).put()
user_models.UserSettingsModel(
id=self.USER_2_USER_ID,
gae_id=self.USER_2_GAE_ID,
email='some.different@email.com',
role=feconf.ROLE_ID_COLLECTION_EDITOR
).put()
user_models.UserSettingsModel(
id=self.USER_3_USER_ID,
gae_id=self.USER_3_GAE_ID,
email='some.different@email.cz',
role=feconf.ROLE_ID_COLLECTION_EDITOR
).put()
def test_one_user_one_model_full_id(self):
user_models.CompletedActivitiesModel(
id=self.USER_1_GAE_ID,
exploration_ids=['1', '2'],
collection_ids=['1', '2']).put()
user_models.CompletedActivitiesModel(
id=feconf.SYSTEM_COMMITTER_ID,
exploration_ids=['1', '2'],
collection_ids=['1', '2']).put()
user_models.ExpUserLastPlaythroughModel(
id='%s.%s' % (self.USER_2_GAE_ID, 'exp_id'),
user_id=self.USER_2_GAE_ID,
exploration_id='exp_id',
last_played_exp_version=2,
last_played_state_name='start').put()
user_models.ExpUserLastPlaythroughModel(
id='%s.%s' % (feconf.SYSTEM_COMMITTER_ID, 'exp_id'),
user_id=feconf.SYSTEM_COMMITTER_ID,
exploration_id='exp_id',
last_played_exp_version=2,
last_played_state_name='start').put()
user_models.UserContributionScoringModel(
id='%s.%s' % ('category', self.USER_2_GAE_ID),
user_id=self.USER_2_USER_ID,
score_category='category',
score=1.5,
has_email_been_sent=False).put()
exp_models.ExplorationSnapshotMetadataModel(
id='exp_1_id',
committer_id=self.USER_2_GAE_ID,
commit_type='create',
commit_message='commit message 2',
commit_cmds=[{'cmd': 'some_command'}]).put()
exp_models.ExplorationSnapshotMetadataModel(
id='exp_2_id',
committer_id=feconf.SYSTEM_COMMITTER_ID,
commit_type='create',
commit_message='commit message 2',
commit_cmds=[{'cmd': 'some_command'}]).put()
feedback_models.GeneralFeedbackThreadModel(
id='type.id.generated',
entity_type='type',
entity_id='id',
subject='subject').put()
feedback_models.GeneralFeedbackThreadUserModel(
id='None.thread_id',
thread_id='thread_id').put()
topic_models.TopicRightsModel.put_multi([
topic_models.TopicRightsModel(
id='topic_1_id',
manager_ids=[self.USER_1_GAE_ID])])
topic_models.TopicRightsModel.put_multi([
topic_models.TopicRightsModel(
id='topic_2_id',
manager_ids=[feconf.SYSTEM_COMMITTER_ID])])
output = self._run_one_off_job()
self.assertIn(
['FAILURE - CompletedActivitiesModel', [self.USER_1_GAE_ID]],
output)
self.assertIn(
['SUCCESS - CompletedActivitiesModel', 1], output)
self.assertIn(
['FAILURE - ExpUserLastPlaythroughModel',
['%s.%s' % (self.USER_2_GAE_ID, 'exp_id')]],
output)
self.assertIn(
['SUCCESS - ExpUserLastPlaythroughModel', 1],
output)
self.assertIn(
['FAILURE - UserContributionScoringModel',
['%s.%s' % ('category', self.USER_2_GAE_ID)]],
output)
self.assertIn(
['FAILURE - ExplorationSnapshotMetadataModel', ['exp_1_id']],
output)
self.assertIn(
['SUCCESS - ExplorationSnapshotMetadataModel', 1], output)
self.assertIn(
['SUCCESS_NONE - GeneralFeedbackThreadModel', 1], output)
self.assertIn(
['SUCCESS_NONE - GeneralFeedbackThreadUserModel', 1], output)
self.assertIn(
['FAILURE - TopicRightsModel', ['topic_1_id']], output)
self.assertIn(['SUCCESS - TopicRightsModel', 1], output)
self.assertIn(['SUCCESS - UserSettingsModel', 3], output)
| 41.912313
| 80
| 0.65059
| 5,112
| 44,930
| 5.297731
| 0.058294
| 0.032789
| 0.02437
| 0.030906
| 0.849937
| 0.807991
| 0.778598
| 0.760136
| 0.740824
| 0.686434
| 0
| 0.008478
| 0.262297
| 44,930
| 1,071
| 81
| 41.951447
| 0.808599
| 0.034097
| 0
| 0.714133
| 0
| 0
| 0.06237
| 0.027918
| 0
| 0
| 0
| 0
| 0.132762
| 1
| 0.039615
| false
| 0.004283
| 0.011777
| 0
| 0.093148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0badc90d6d563729b3350b06537f60efc8553a1e
| 190
|
py
|
Python
|
fun/views.py
|
guettli/funql
|
723a09b35deb9654578db381d313c6cd899510fd
|
[
"MIT"
] | null | null | null |
fun/views.py
|
guettli/funql
|
723a09b35deb9654578db381d313c6cd899510fd
|
[
"MIT"
] | null | null | null |
fun/views.py
|
guettli/funql
|
723a09b35deb9654578db381d313c6cd899510fd
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
def accounts_profile(request):
return render(request, 'fun/accounts_profile.html')
def start(request):
return render(request, 'fun/start.html')
| 23.75
| 55
| 0.763158
| 25
| 190
| 5.72
| 0.52
| 0.20979
| 0.265734
| 0.363636
| 0.405594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126316
| 190
| 7
| 56
| 27.142857
| 0.861446
| 0
| 0
| 0
| 0
| 0
| 0.205263
| 0.131579
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
e7ece19d71d111d828dd339ccb2089662abde3d1
| 7,670
|
py
|
Python
|
controllers/turistas salientes aena_controller.py
|
SergioCMDev/Busines-Inteligence-applied-to-tourism
|
61834a46fce22453e94b7bbdf8d4ecdcf128285a
|
[
"Apache-2.0"
] | null | null | null |
controllers/turistas salientes aena_controller.py
|
SergioCMDev/Busines-Inteligence-applied-to-tourism
|
61834a46fce22453e94b7bbdf8d4ecdcf128285a
|
[
"Apache-2.0"
] | null | null | null |
controllers/turistas salientes aena_controller.py
|
SergioCMDev/Busines-Inteligence-applied-to-tourism
|
61834a46fce22453e94b7bbdf8d4ecdcf128285a
|
[
"Apache-2.0"
] | null | null | null |
from ..DB.Repositorio_Turistas_Salientes_Aena import RepositoryTuristasSalientesAena as DBRepository
from ..Utilidades.Conversores import Conversores as Conversor
def obtener_cantidad_anio(PaisOrigen, Anio): #OK
"""
Obtiene la cantidad de personas que salen de un pais en un años y devuelve la cantidad
Obtiene la cantidad de personas que salen de un pais durante un rango de años y lo organizamos mensualmente
:param PaisOrigen: Pais del que salen los turistas
:type PaisOrigen: str
:param Anio: Anio
:type Anio: int
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerDatosTuristasAenaEnUnAnioDadoPaisDestinoAnio(PaisOrigen, Anio)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels, Anio, Anio)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval
##Mostrar JSON Reducido
# retval = conversor.convertirAJson(arrayTuplas)
# return retval
def obtener_cantidad_ciudad_destino_en_anio(PaisOrigen, CiudadOrigen, Anio): #OK
"""
Obtiene la cantidad de personas que van hacia una ciudad durante un rango de años
Obtiene la cantidad de personas que van hacia una ciudad durante un rango de años
:param PaisDestino: Pais
:type PaisDestino: str
:param CiudadDestino: Ciudad
:type CiudadDestino: str
:param AnioInicio: Anio Inicio
:type AnioInicio: int
:param AnioFin: Anio Fin
:type AnioFin: int
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerDatosTuristasAenaDadoPaisDestinoCiudadDestinoAnioMinMax(PaisOrigen, CiudadOrigen, Anio, Anio )
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels, Anio, Anio)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval
##Mostrar JSON Reducido
# retval = conversor.convertirAJson(arrayTuplas)
# return retvaltval
def obtener_cantidad_ciudad_en_anio(PaisOrigen, CiudadOrigen, Anio): #OK
"""
Obtiene la cantidad de personas que salen de un pais en un año y devuelve la cantidad
Obtiene la cantidad de personas que salen de un pais durante un rango de años y lo organizamos mensualmente
:param PaisOrigen: Pais del que salen los turistas
:type PaisOrigen: str
:param CiudadOrigen: Ciudad de la que salen los turistas
:type CiudadOrigen: str
:param Anio: Anio
:type Anio: int
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerDatosTuristasAenaEnUnAnioDadoPaisDestinoCiudadDestinoAnio(PaisOrigen, CiudadOrigen, Anio)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels, Anio, Anio)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval
##Mostrar JSON Reducido
# retval = conversor.convertirAJson(arrayTuplas)
# return retval
def obtener_cantidad_ciudad_en_mes_en_anio(PaisOrigen, CiudadOrigen, Mes, Anio): #OK
"""
Obtiene la cantidad de personas que salen de una ciudad de un pais en un año durante un mismo mes y devuelve las cantidades
Obtiene la cantidad de personas que salen de una ciudad de un pais en un año durante un mismo mes y devuelve las cantidades
:param PaisOrigen: Pais del que salen los turistas
:type PaisOrigen: str
:param CiudadOrigen: Ciudad de la que salen los turistas
:type CiudadOrigen: str
:param Anio: Anio
:type Anio: int
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerNumeroTuristasAenaDadoPaisOrigenCiudadOrigenMesAnio(PaisOrigen, CiudadOrigen, Mes, Anio)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
# matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels, Anio, Anio)
# retval = conversor.ObtenerDataJSONExtendido(matriz)
# return retval
##Mostrar JSON Reducido
retval = conversor.convertirAJson(arrayTuplas)
return retval
def obtener_cantidad_ciudad_en_mes_en_rango_anios(PaisOrigen, CiudadOrigen, Mes, AnioInicio, AnioFin): #OK
"""
Obtiene la cantidad de personas que salen de una ciudad de un pais en un rango de años durante un mismo mes y devuelve las cantidades
Obtiene la cantidad de personas que salen de una ciudad de un pais en un rango de años durante un mismo mes y devuelve las cantidades
:param PaisOrigen: Pais del que salen los turistas
:type PaisOrigen: str
:param CiudadOrigen: Ciudad de la que salen los turistas
:type CiudadOrigen: str
:param Anio: Anio
:type Anio: int
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerDatosTuristasAenaDadoPaisCiudadMesAnioMinMax(PaisOrigen, CiudadOrigen, Mes, AnioInicio, AnioFin)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval
##Mostrar JSON Reducido
# retval = conversor.convertirAJson(arrayTuplas)
# return retval
def obtener_cantidad_ciudad_mensualmente_en_anio(PaisOrigen, AnioInicio, AnioFin): #OK
"""
Obtiene la cantidad de personas que salen de un pais durante un rango de años y lo organiza mensualmente
Obtiene la cantidad de personas que salen de un pais durante un rango de años y lo organizas mensualmente
:param PaisOrigen: Pais del que salen los turistas
:type PaisOrigen: str
:param AnioInicio: Anio Inicio
:type AnioInicio: int
:param AnioFin: Anio Fin
:type AnioFin: int
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerDatosTuristasMensualmenteAenaDadoPaisDestinoCiudadAnio(PaisOrigen, AnioInicio, AnioFin)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval
##Mostrar JSON Reducido
# retval = conversor.convertirAJson(arrayTuplas)
# return retval
def obtener_cantidad_salientes_rango_anios(PaisOrigen, AnioInicio, AnioFin): #OK
"""
Obtiene la cantidad de personas que salen de un pais durante un rango de años y lo organiza anualmente
Obtiene la cantidad de personas que salen de un pais durante un rango de años y lo organizas mensualmente
:param PaisOrigen: Pais del que salen los turistas
:type PaisOrigen: str
:param AnioInicio: Anio Inicio
:type AnioInicio: int
:param AnioFin: Anio Fin
:type AnioFin: int
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerNumeroTuristasAenaDadoPaisDestinoAnioMinMax(PaisOrigen, AnioInicio, AnioFin)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval
##Mostrar JSON Reducido
# retval = conversor.convertirAJson(arrayTuplas)
# return retval
| 33.938053
| 137
| 0.744198
| 850
| 7,670
| 6.672941
| 0.104706
| 0.029619
| 0.041961
| 0.046897
| 0.849788
| 0.836742
| 0.836213
| 0.836213
| 0.836213
| 0.836213
| 0
| 0
| 0.195698
| 7,670
| 225
| 138
| 34.088889
| 0.919436
| 0.47927
| 0
| 0.701754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.122807
| false
| 0
| 0.035088
| 0
| 0.280702
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f02ea9f41d3b3e416fe32ca09f4f5eaae585091a
| 167
|
py
|
Python
|
swag_auth/admin.py
|
LikaloLLC/django-swag-auth
|
06fd027beca240ff50567a3be4bedee2a7e40a97
|
[
"BSD-3-Clause"
] | null | null | null |
swag_auth/admin.py
|
LikaloLLC/django-swag-auth
|
06fd027beca240ff50567a3be4bedee2a7e40a97
|
[
"BSD-3-Clause"
] | 6
|
2021-05-10T13:11:24.000Z
|
2021-09-08T13:35:46.000Z
|
swag_auth/admin.py
|
LikaloLLC/django-swag-auth
|
06fd027beca240ff50567a3be4bedee2a7e40a97
|
[
"BSD-3-Clause"
] | 2
|
2021-04-29T20:08:21.000Z
|
2021-11-17T19:21:42.000Z
|
from django.contrib import admin
from swag_auth.models import SwaggerStorage, ConnectorToken
admin.site.register(ConnectorToken)
admin.site.register(SwaggerStorage)
| 23.857143
| 59
| 0.856287
| 20
| 167
| 7.1
| 0.6
| 0.267606
| 0.323944
| 0.43662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077844
| 167
| 6
| 60
| 27.833333
| 0.922078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
b2b9af50cfc752550a113bd9a62c8317f3e18bb1
| 9,878
|
py
|
Python
|
test/test_training_handler.py
|
tobynance/simple_mud
|
c9be32327fcab0c9bd37fabedb7dd566709b7d48
|
[
"MIT"
] | 6
|
2015-04-24T13:09:37.000Z
|
2022-01-27T01:12:47.000Z
|
test/test_training_handler.py
|
tobynance/simple_mud
|
c9be32327fcab0c9bd37fabedb7dd566709b7d48
|
[
"MIT"
] | 15
|
2015-03-09T00:07:55.000Z
|
2015-03-10T02:30:23.000Z
|
test/test_training_handler.py
|
tobynance/simple_mud
|
c9be32327fcab0c9bd37fabedb7dd566709b7d48
|
[
"MIT"
] | 2
|
2015-04-24T13:09:38.000Z
|
2020-12-22T08:40:07.000Z
|
import unittest
import os
os.environ["SIMPLE_MUD_LOAD_PLAYERS"] = "false"
from logon_handler import LogonHandler
from player import PlayerDatabase, Player
import training_handler
from training_handler import TrainingHandler
from test_utils import MockProtocol, stats_message
########################################################################
class TrainingHandlerTest(unittest.TestCase):
####################################################################
def setUp(self):
MockProtocol.set_handler_class(handler_class=LogonHandler)
self.protocol = MockProtocol()
training_handler.player_database = PlayerDatabase()
self.player = Player(28)
self.player.name = "jerry"
training_handler.player_database.add_player(self.player)
self.protocol.remove_handler()
self.player.protocol = self.protocol
self.protocol.add_handler(TrainingHandler(self.protocol, self.player))
self.handler = self.protocol.handler
self.assertEqual(len(list(training_handler.player_database.all())), 1)
self.protocol.send_data = []
self.maxDiff = None
####################################################################
def test_handle__quit(self):
self.assertEqual(len(self.player.protocol.handlers), 1)
self.assertEqual(self.player.protocol.handlers, [self.handler])
self.handler.handle("quit")
self.assertEqual(len(self.player.protocol.handlers), 0)
####################################################################
def test_handle(self):
self.assertEqual(self.player.stat_points, 18)
self.assertEqual(self.player.attributes.HEALTH, 1)
self.assertEqual(self.player.attributes.BASE_HEALTH, 1)
self.assertEqual(self.player.attributes.MODIFIER_HEALTH, 0)
self.assertEqual(self.player.attributes.STRENGTH, 1)
self.assertEqual(self.player.attributes.BASE_STRENGTH, 1)
self.assertEqual(self.player.attributes.MODIFIER_STRENGTH, 0)
self.assertEqual(self.player.attributes.AGILITY, 1)
self.assertEqual(self.player.attributes.BASE_AGILITY, 1)
self.assertEqual(self.player.attributes.MODIFIER_AGILITY, 0)
self.handler.handle("1")
self.assertEqual(self.player.stat_points, 17)
self.assertEqual(self.player.attributes.HEALTH, 1)
self.assertEqual(self.player.attributes.BASE_HEALTH, 1)
self.assertEqual(self.player.attributes.MODIFIER_HEALTH, 0)
self.assertEqual(self.player.attributes.STRENGTH, 2)
self.assertEqual(self.player.attributes.BASE_STRENGTH, 2)
self.assertEqual(self.player.attributes.MODIFIER_STRENGTH, 0)
self.assertEqual(self.player.attributes.AGILITY, 1)
self.assertEqual(self.player.attributes.BASE_AGILITY, 1)
self.assertEqual(self.player.attributes.MODIFIER_AGILITY, 0)
self.handler.handle("1")
self.assertEqual(self.player.stat_points, 16)
self.assertEqual(self.player.attributes.HEALTH, 1)
self.assertEqual(self.player.attributes.BASE_HEALTH, 1)
self.assertEqual(self.player.attributes.MODIFIER_HEALTH, 0)
self.assertEqual(self.player.attributes.STRENGTH, 3)
self.assertEqual(self.player.attributes.BASE_STRENGTH, 3)
self.assertEqual(self.player.attributes.MODIFIER_STRENGTH, 0)
self.assertEqual(self.player.attributes.AGILITY, 1)
self.assertEqual(self.player.attributes.BASE_AGILITY, 1)
self.assertEqual(self.player.attributes.MODIFIER_AGILITY, 0)
self.handler.handle("2")
self.assertEqual(self.player.stat_points, 15)
self.assertEqual(self.player.attributes.HEALTH, 2)
self.assertEqual(self.player.attributes.BASE_HEALTH, 2)
self.assertEqual(self.player.attributes.MODIFIER_HEALTH, 0)
self.assertEqual(self.player.attributes.STRENGTH, 3)
self.assertEqual(self.player.attributes.BASE_STRENGTH, 3)
self.assertEqual(self.player.attributes.MODIFIER_STRENGTH, 0)
self.assertEqual(self.player.attributes.AGILITY, 1)
self.assertEqual(self.player.attributes.BASE_AGILITY, 1)
self.assertEqual(self.player.attributes.MODIFIER_AGILITY, 0)
self.handler.handle("2")
self.assertEqual(self.player.stat_points, 14)
self.assertEqual(self.player.attributes.HEALTH, 3)
self.assertEqual(self.player.attributes.BASE_HEALTH, 3)
self.assertEqual(self.player.attributes.MODIFIER_HEALTH, 0)
self.assertEqual(self.player.attributes.STRENGTH, 3)
self.assertEqual(self.player.attributes.BASE_STRENGTH, 3)
self.assertEqual(self.player.attributes.MODIFIER_STRENGTH, 0)
self.assertEqual(self.player.attributes.AGILITY, 1)
self.assertEqual(self.player.attributes.BASE_AGILITY, 1)
self.assertEqual(self.player.attributes.MODIFIER_AGILITY, 0)
self.handler.handle("3")
self.assertEqual(self.player.stat_points, 13)
self.assertEqual(self.player.attributes.HEALTH, 3)
self.assertEqual(self.player.attributes.BASE_HEALTH, 3)
self.assertEqual(self.player.attributes.MODIFIER_HEALTH, 0)
self.assertEqual(self.player.attributes.STRENGTH, 3)
self.assertEqual(self.player.attributes.BASE_STRENGTH, 3)
self.assertEqual(self.player.attributes.MODIFIER_STRENGTH, 0)
self.assertEqual(self.player.attributes.AGILITY, 2)
self.assertEqual(self.player.attributes.BASE_AGILITY, 2)
self.assertEqual(self.player.attributes.MODIFIER_AGILITY, 0)
self.handler.handle("3")
self.assertEqual(self.player.stat_points, 12)
self.assertEqual(self.player.attributes.HEALTH, 3)
self.assertEqual(self.player.attributes.BASE_HEALTH, 3)
self.assertEqual(self.player.attributes.MODIFIER_HEALTH, 0)
self.assertEqual(self.player.attributes.STRENGTH, 3)
self.assertEqual(self.player.attributes.BASE_STRENGTH, 3)
self.assertEqual(self.player.attributes.MODIFIER_STRENGTH, 0)
self.assertEqual(self.player.attributes.AGILITY, 3)
self.assertEqual(self.player.attributes.BASE_AGILITY, 3)
self.assertEqual(self.player.attributes.MODIFIER_AGILITY, 0)
self.player.stat_points = 1
self.handler.handle("3")
self.assertEqual(self.player.stat_points, 0)
self.assertEqual(self.player.attributes.HEALTH, 3)
self.assertEqual(self.player.attributes.BASE_HEALTH, 3)
self.assertEqual(self.player.attributes.MODIFIER_HEALTH, 0)
self.assertEqual(self.player.attributes.STRENGTH, 3)
self.assertEqual(self.player.attributes.BASE_STRENGTH, 3)
self.assertEqual(self.player.attributes.MODIFIER_STRENGTH, 0)
self.assertEqual(self.player.attributes.AGILITY, 4)
self.assertEqual(self.player.attributes.BASE_AGILITY, 4)
self.assertEqual(self.player.attributes.MODIFIER_AGILITY, 0)
self.handler.handle("3")
self.assertEqual(self.player.stat_points, 0)
self.assertEqual(self.player.attributes.HEALTH, 3)
self.assertEqual(self.player.attributes.BASE_HEALTH, 3)
self.assertEqual(self.player.attributes.MODIFIER_HEALTH, 0)
self.assertEqual(self.player.attributes.STRENGTH, 3)
self.assertEqual(self.player.attributes.BASE_STRENGTH, 3)
self.assertEqual(self.player.attributes.MODIFIER_STRENGTH, 0)
self.assertEqual(self.player.attributes.AGILITY, 4)
self.assertEqual(self.player.attributes.BASE_AGILITY, 4)
self.assertEqual(self.player.attributes.MODIFIER_AGILITY, 0)
####################################################################
def test_handle__unknown_command(self):
self.handler.handle("beep")
self.assertEqual(self.player.stat_points, 18)
self.assertEqual(self.player.attributes.HEALTH, 1)
self.assertEqual(self.player.attributes.BASE_HEALTH, 1)
self.assertEqual(self.player.attributes.MODIFIER_HEALTH, 0)
self.assertEqual(self.player.attributes.STRENGTH, 1)
self.assertEqual(self.player.attributes.BASE_STRENGTH, 1)
self.assertEqual(self.player.attributes.MODIFIER_STRENGTH, 0)
self.assertEqual(self.player.attributes.AGILITY, 1)
self.assertEqual(self.player.attributes.BASE_AGILITY, 1)
self.assertEqual(self.player.attributes.MODIFIER_AGILITY, 0)
self.assertEqual(self.protocol.send_data, ["<reset><clearscreen><red>Unknown Command 'beep'<newline>", stats_message])
####################################################################
def test_enter(self):
self.player.logged_in = True
self.player.active = True
self.player.newbie = False
self.handler.enter()
self.assertEqual(self.player.active, False)
self.assertEqual(self.player.newbie, False)
self.assertEqual(self.protocol.send_data, [stats_message])
####################################################################
def test_hung_up(self):
self.player.logged_in = True
self.handler.hung_up()
self.assertEqual(self.protocol.send_data, [])
self.assertEqual(self.player.logged_in, False)
####################################################################
def test_flooded(self):
self.player.logged_in = True
self.handler.hung_up()
self.assertEqual(self.protocol.send_data, [])
self.assertEqual(self.player.logged_in, False)
####################################################################
def test_print_stats(self):
self.handler.print_stats()
self.assertEqual(self.protocol.send_data, ["<clearscreen>" + stats_message])
self.handler.print_stats(clear_screen=False)
self.assertEqual(self.protocol.send_data, ["<clearscreen>" + stats_message, stats_message])
| 50.917526
| 126
| 0.671492
| 1,102
| 9,878
| 5.903811
| 0.07804
| 0.181371
| 0.324162
| 0.403474
| 0.827698
| 0.81371
| 0.803566
| 0.748079
| 0.748079
| 0.721795
| 0
| 0.014759
| 0.163191
| 9,878
| 193
| 127
| 51.181347
| 0.77232
| 0
| 0
| 0.628049
| 0
| 0
| 0.014144
| 0.005938
| 0
| 0
| 0
| 0
| 0.695122
| 1
| 0.04878
| false
| 0
| 0.042683
| 0
| 0.097561
| 0.018293
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b2d3dd928c9def01fc74809b0ab723fd17790a4c
| 435
|
py
|
Python
|
test/utils_test/test_color_util.py
|
rhsu/slackjack
|
c6ba6ec97fcf669c8f4dddc83a3b03cd829ec792
|
[
"MIT"
] | null | null | null |
test/utils_test/test_color_util.py
|
rhsu/slackjack
|
c6ba6ec97fcf669c8f4dddc83a3b03cd829ec792
|
[
"MIT"
] | 8
|
2019-03-25T23:11:54.000Z
|
2019-04-09T23:38:23.000Z
|
test/utils_test/test_color_util.py
|
rhsu/slackjack
|
c6ba6ec97fcf669c8f4dddc83a3b03cd829ec792
|
[
"MIT"
] | 1
|
2019-04-04T00:12:35.000Z
|
2019-04-04T00:12:35.000Z
|
from utils.color_util import determine_color
def test_1_10_even():
assert determine_color(10) == "black"
def test_1_10_odd():
assert determine_color(1) == "red"
def test_19_28_even():
assert determine_color(28) == "black"
def test_19_28_odd():
assert determine_color(19) == "red"
def test_11_18_even():
assert determine_color(18) == "red"
def test_11_18_odd():
assert determine_color(11) == "black"
| 16.730769
| 44
| 0.698851
| 67
| 435
| 4.149254
| 0.283582
| 0.352518
| 0.431655
| 0.258993
| 0.100719
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091922
| 0.174713
| 435
| 25
| 45
| 17.4
| 0.682451
| 0
| 0
| 0
| 0
| 0
| 0.055172
| 0
| 0
| 0
| 0
| 0
| 0.461538
| 1
| 0.461538
| true
| 0
| 0.076923
| 0
| 0.538462
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
b2dcfae0c7c6f659d1748c94e12169e1e0d0826a
| 63,386
|
py
|
Python
|
rhced/obsolete/model.py
|
ecosang/rhced
|
b51fc7483a5b5b9071ea004bf4e5e29ebfcf1395
|
[
"MIT"
] | 1
|
2022-02-18T09:13:37.000Z
|
2022-02-18T09:13:37.000Z
|
rhced/obsolete/model.py
|
ecosang/rhced
|
b51fc7483a5b5b9071ea004bf4e5e29ebfcf1395
|
[
"MIT"
] | null | null | null |
rhced/obsolete/model.py
|
ecosang/rhced
|
b51fc7483a5b5b9071ea004bf4e5e29ebfcf1395
|
[
"MIT"
] | null | null | null |
__all__=['trainig_loop','ResNIHCM']
import math
import pandas as pd
import os
import pickle
import datetime
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import torch
import torch.optim as optim
import pyro
import pyro.distributions as dist
from pyro.infer import SVI, TraceMeanField_ELBO
import torch
import torch.optim as optim
import torch.nn as nn
from torch.optim import Adam
from torch.distributions import constraints
from torch.utils.data import Dataset, DataLoader
import pyro
from pyro.optim import MultiStepLR, ExponentialLR
import pyro
import pyro.distributions as dist
from pyro.infer import SVI, Trace_ELBO,TraceMeanField_ELBO
def trainig_loop(n_epochs, optimizer, model, loss_fn, train_loader,cuda=False,priors=None,prior_network=False,new_data=False,missing_data=False):
if cuda:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if device=="cpu":
print("Cuda is not available. CPU is used.")
else:
device="cpu"
model=model.to(device)
svi = SVI(model.model, model.guide, optimizer, loss=loss_fn)
loss_list=[]
for epoch in range(1, n_epochs + 1):
loss_train = 0.0
for ix, (y_net, t_out,i_heat,i_heat_on,i_heat_off,i_cool,i_cool_on,i_cool_off,i_aux,i_aux_on,i_aux_off,i_heat_df) in enumerate(train_loader):
y_net = y_net.to(device=device) # <1>
t_out = t_out.to(device=device) # <1>
i_heat = i_heat.to(device=device) # <1>
i_heat_on = i_heat_on.to(device=device) # <1>
i_heat_off = i_heat_off.to(device=device) # <1>
i_cool = i_cool.to(device=device) # <1>
i_cool_on = i_cool_on.to(device=device) # <1>
i_cool_off = i_cool_off.to(device=device) # <1>
i_aux = i_aux.to(device=device) # <1>
i_aux_on = i_aux_on.to(device=device) # <1>
i_aux_off = i_aux_off.to(device=device) # <1>
i_heat_df =i_heat_df.to(device=device)
if priors is None:
loss=svi.step(y_net=y_net, t_out=t_out,i_heat=i_heat,i_heat_on=i_heat_on,i_heat_off=i_heat_off,i_cool=i_cool,i_cool_on=i_cool_on,i_cool_off=i_cool_off,i_aux=i_aux,i_aux_on=i_aux_on,i_aux_off=i_aux_off,i_heat_df=i_heat_df,priors=None)
else:
# no model update. Just in-prior computation (actually, it doesn't exist)
raise ValueError("Training is not in any case. check priors, new_data, prior_network params")
loss_train += loss
loss_list.append(loss_train / len(train_loader))
if epoch == 1 or epoch % 5 == 0:
print('{} Epoch {}, Training loss {}'.format(
datetime.datetime.now(), epoch,
loss_train / len(train_loader)))
if (epoch==1 or epoch%10==0) and (type(optimizer)==pyro.optim.lr_scheduler.PyroLRScheduler) :
optimizer.step()
#print(f'learning rate {next(iter(svi.optim.optim_objs.values())).get_last_lr()[0]}')
return loss_list
class ResNIHCM(nn.Module):
def __init__(self):
super().__init__()
# define dimensions
# Use ELU see Murphy p.397
self.elu=nn.ELU()
self.relu=nn.ReLU()
self.softmax=nn.Softmax(dim=1)
self.tanh=nn.Tanh()
self.softplus=nn.Softplus()
def calculate_concentration(self,mu,sigma):
concentration_alpha=((1-mu)/(sigma**2)-1/mu)*(mu**2)
concentration_beta=concentration_alpha*(1/mu-1)
return concentration_alpha, concentration_beta
def model(self, y_net, t_out,
i_heat,i_heat_on,i_heat_off,
i_cool,i_cool_on,i_cool_off,
i_aux,i_aux_on,i_aux_off,i_heat_df,
priors=None):
# it is hard to generalize the process.
# we may have matrix,
# initial network
self.batch_sz=t_out.shape[0]
device=t_out.device
if priors is None:
add_noise=0 # no noise addition for priors
noise_scale=0.01
noise_mean=0
priors={
"mu_misc":np.array([-3.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_misc~logN(-3,2.5) [0.0004,0.05,6.783]
"sigma_misc":np.array([1.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_beta0_heat":np.array([-3.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta0~N(-2,1.0) [-4.0~0.0]
"sigma_beta0_heat":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # exp(beta0) [0.018~1]
"mu_beta1_heat":np.array([-4.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta1~logN(-1.5,0.8) [0.04~1.0]
"sigma_beta1_heat":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
"sigma_heat":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_heat~logN(-1.2,0.6) [0.093,0.30,1.0]
"mu_heat_on":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_heat_on~Beta(mu_heat_on=0.5,sigma_heat_on=1/12) # 0~1 flat
"sigma_heat_on":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_heat_off":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_heat_off~Beta(mu_heat_on=0.5,sigma_heat_on=1/12) # 0~1 flat
"sigma_heat_off":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_beta0_cool":np.array([-2.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta0~N(-2,1.0) [-4.0~0.0]
"sigma_beta0_cool":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # exp(beta0) [0.018~1]
"mu_beta1_cool":np.array([-4.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta1~logN(-1.5,0.8) [0.04~1.0]
"sigma_beta1_cool":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
"sigma_cool":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_cool~logN(-1.2,0.6) [0.093,0.30,1.0]
"mu_cool_on":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_cool_on~Beta(mu_cool_on=0.5,sigma_cool_on=1/12) # 0~1 flat
"sigma_cool_on":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_cool_off":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_cool_off~Beta(mu_cool_on=0.5,sigma_cool_on=1/12) # 0~1 flat
"sigma_cool_off":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_aux":np.array([-3.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_aux~logN(-0.4,0.4) [0.3,0.67,1.43]
"sigma_aux":np.array([1.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_heat_df":np.array([-4.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_aux~logN(-0.4,0.4) [0.3,0.67,1.43]
"sigma_heat_df":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_aux_on":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_aux_on~Beta(mu_aux_on=0.5,sigma_aux_on=1/12) # 0~1 flat
"sigma_aux_on":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_aux_off":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_aux_off~Beta(mu_aux_on=0.5,sigma_aux_on=1/12) # 0~1 flat
"sigma_aux_off":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_phi_df":np.array([-1/3])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # phi~N(-1/3,1/6) give [-2/3,-1/3,0] which is [-10,0,10] in real scale
# "sigma_phi_df":np.array([1/6])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_psi":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # psi~Beta(mu_psi=0.5,sigma_psi=1/12) # 0~1 flat
# "sigma_psi":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
"mu_sigma_net":np.array([-4.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
"sigma_sigma_net":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise #
}
mu_misc=torch.tensor(priors['mu_misc'],dtype=torch.float32).to(device)
sigma_misc=torch.tensor(priors['sigma_misc'],dtype=torch.float32).to(device)
mu_beta0_heat=torch.tensor(priors['mu_beta0_heat'],dtype=torch.float32).to(device)
sigma_beta0_heat=torch.tensor(priors['sigma_beta0_heat'],dtype=torch.float32).to(device)
mu_beta1_heat=torch.tensor(priors['mu_beta1_heat'],dtype=torch.float32).to(device)
sigma_beta1_heat=torch.tensor(priors['sigma_beta1_heat'],dtype=torch.float32).to(device)
sigma_heat=torch.tensor(priors['sigma_heat'],dtype=torch.float32).to(device)
mu_heat_on=torch.tensor(priors['mu_heat_on'],dtype=torch.float32).to(device)
sigma_heat_on=torch.tensor(priors['sigma_heat_on'],dtype=torch.float32).to(device)
mu_heat_off=torch.tensor(priors['mu_heat_off'],dtype=torch.float32).to(device)
sigma_heat_off=torch.tensor(priors['sigma_heat_off'],dtype=torch.float32).to(device)
mu_beta0_cool=torch.tensor(priors['mu_beta0_cool'],dtype=torch.float32).to(device)
sigma_beta0_cool=torch.tensor(priors['sigma_beta0_cool'],dtype=torch.float32).to(device)
mu_beta1_cool=torch.tensor(priors['mu_beta1_cool'],dtype=torch.float32).to(device)
sigma_beta1_cool=torch.tensor(priors['sigma_beta1_cool'],dtype=torch.float32).to(device)
sigma_cool=torch.tensor(priors['sigma_cool'],dtype=torch.float32).to(device)
mu_cool_on=torch.tensor(priors['mu_cool_on'],dtype=torch.float32).to(device)
sigma_cool_on=torch.tensor(priors['sigma_cool_on'],dtype=torch.float32).to(device)
mu_cool_off=torch.tensor(priors['mu_cool_off'],dtype=torch.float32).to(device)
sigma_cool_off=torch.tensor(priors['sigma_cool_off'],dtype=torch.float32).to(device)
mu_aux=torch.tensor(priors['mu_aux'],dtype=torch.float32).to(device)
sigma_aux=torch.tensor(priors['sigma_aux'],dtype=torch.float32).to(device)
mu_aux_on=torch.tensor(priors['mu_aux_on'],dtype=torch.float32).to(device)
sigma_aux_on=torch.tensor(priors['sigma_aux_on'],dtype=torch.float32).to(device)
mu_aux_off=torch.tensor(priors['mu_aux_off'],dtype=torch.float32).to(device)
sigma_aux_off=torch.tensor(priors['sigma_aux_off'],dtype=torch.float32).to(device)
mu_heat_df=torch.tensor(priors['mu_heat_df'],dtype=torch.float32).to(device)
sigma_heat_df=torch.tensor(priors['sigma_heat_df'],dtype=torch.float32).to(device)
#mu_phi_df=torch.tensor(priors['mu_phi_df'],dtype=torch.float32).to(device)
#sigma_phi_df=torch.tensor(priors['sigma_phi_df'],dtype=torch.float32).to(device)
#mu_psi=torch.tensor(priors['mu_psi'],dtype=torch.float32).to(device)
#sigma_psi=torch.tensor(priors['sigma_psi'],dtype=torch.float32).to(device)
mu_sigma_net=torch.tensor(priors['mu_sigma_net'],dtype=torch.float32).to(device)
sigma_sigma_net=torch.tensor(priors['sigma_sigma_net'],dtype=torch.float32).to(device)
E_misc=self.softplus(pyro.sample("E_misc",dist.Normal(mu_misc,sigma_misc).to_event(1)))
# here mu_heat is not real scale. E_heat~LogNormal(mu_heat,sigma_heat)
beta0_heat=pyro.sample("beta0_heat",dist.Normal(mu_beta0_heat,sigma_beta0_heat).to_event(1))
beta1_heat=self.softplus(pyro.sample("beta1_heat",dist.Normal(mu_beta1_heat,sigma_beta1_heat).to_event(1)))
mu_heat=pyro.deterministic("mu_heat",beta0_heat+beta1_heat*t_out)
E_heat=self.softplus(pyro.sample("E_heat",dist.Normal(mu_heat,sigma_heat).to_event(1)))
#print(f"E_heat shape is {E_heat.shape}")
beta0_cool=pyro.sample("beta0_cool",dist.Normal(mu_beta0_cool,sigma_beta0_cool).to_event(1))
beta1_cool=self.softplus(pyro.sample("beta1_cool",dist.Normal(mu_beta1_cool,sigma_beta1_cool).to_event(1)))
mu_cool=pyro.deterministic("mu_cool",beta0_cool+beta1_cool*t_out)
E_cool=self.softplus(pyro.sample("E_cool",dist.Normal(mu_cool,sigma_cool).to_event(1)))
E_aux=self.softplus(pyro.sample("E_aux",dist.Normal(mu_aux,sigma_aux).to_event(1)))
# phi_df=pyro.sample("phi_df",dist.Normal(mu_phi_df,sigma_phi_df).to_event(1))
# mu_psi_alpha,mu_psi_beta=self.calculate_concentration(mu=mu_psi,sigma=sigma_psi)
# psi=pyro.sample("psi",dist.Beta(concentration1=mu_psi_alpha ,concentration0=mu_psi_beta).to_event(1))
eta_heat=i_heat.clone()
mu_heat_on_alpha,mu_heat_on_beta=self.calculate_concentration(mu=mu_heat_on,sigma=sigma_heat_on)
mu_heat_off_alpha,mu_heat_off_beta=self.calculate_concentration(mu=mu_heat_off,sigma=sigma_heat_off)
# print(f'mu_heat_on_alpha is {mu_heat_on_alpha}')
# print(f'mu_heat_on_beta is {mu_heat_on_beta}')
eta_heat_on=pyro.sample("eta_heat_on",dist.Beta(concentration1=mu_heat_on_alpha ,concentration0=mu_heat_on_beta).to_event(1))
eta_heat_off=pyro.sample("eta_heat_off",dist.Beta(concentration1=mu_heat_off_alpha ,concentration0=mu_heat_off_beta).to_event(1))
eta_heat[i_heat_on==1]=eta_heat_on#[i_heat_on==1]
eta_heat[i_heat_off==1]=eta_heat_off#[i_heat_off==1]
eta_cool=i_cool.clone()
mu_cool_on_alpha,mu_cool_on_beta=self.calculate_concentration(mu=mu_cool_on,sigma=sigma_cool_on)
mu_cool_off_alpha,mu_cool_off_beta=self.calculate_concentration(mu=mu_cool_off,sigma=sigma_cool_off)
eta_cool_on=pyro.sample("eta_cool_on",dist.Beta(concentration1=mu_cool_on_alpha ,concentration0=mu_cool_on_beta).to_event(1))
eta_cool_off=pyro.sample("eta_cool_off",dist.Beta(concentration1=mu_cool_off_alpha ,concentration0=mu_cool_off_beta).to_event(1))
eta_cool[i_cool_on==1]=eta_cool_on#[i_cool_on==1]
eta_cool[i_cool_off==1]=eta_cool_off#[i_cool_off==1]
eta_aux=i_aux.clone()
mu_aux_on_alpha,mu_aux_on_beta=self.calculate_concentration(mu=mu_aux_on,sigma=sigma_aux_on)
mu_aux_off_alpha,mu_aux_off_beta=self.calculate_concentration(mu=mu_aux_off,sigma=sigma_aux_off)
eta_aux_on=pyro.sample("eta_aux_on",dist.Beta(concentration1=mu_aux_on_alpha ,concentration0=mu_aux_on_beta).to_event(1))
eta_aux_off=pyro.sample("eta_aux_off",dist.Beta(concentration1=mu_aux_off_alpha ,concentration0=mu_aux_off_beta).to_event(1))
eta_aux[i_aux_on==1]=eta_aux_on#[i_aux_on==1]
eta_aux[i_aux_off==1]=eta_aux_off#[i_aux_off==1]
E_heat_df=self.softplus(pyro.sample("E_heat_df",dist.Normal(mu_heat_df,sigma_heat_df).to_event(1)))
#i_df=torch.zeros_like(i_heat).to(device)
# https://pytorch.org/docs/stable/distributions.html#torch.distributions.beta.Beta.concentration1
# concentration1 (float or Tensor) – 1st concentration parameter of the distribution (often referred to as alpha)
# concentration0 (float or Tensor) – 2nd concentration parameter of the distribution (often referred to as beta)
#with pyro.plate("Emisc", size=t_out.shape[0]):
#i_df_on=pyro.sample("i_df_on",dist.Binomial(total_count=1,probs=psi))
#i_df=torch.where((i_heat==torch.tensor(1,dtype=torch.float32))&(t_out<phi_df),i_df_on,i_df)
y_nan=torch.any(torch.cat([torch.isnan(i_heat)[:,None],
torch.isnan(i_cool)[:,None],
torch.isnan(i_aux)[:,None],
torch.isnan(t_out)[:,None],
torch.isnan(i_heat_df)[:,None],
torch.isnan(y_net)[:,None]
],dim=1),axis=1)
#print(f'y_nan is {y_nan}')
# print(f'eta_heat is {eta_heat}')
# print(f'i_heat is {i_heat}')
mu_net_=eta_heat*i_heat*E_heat+eta_cool*i_cool*E_cool+(eta_aux*i_aux)*E_aux+(i_heat_df)*E_heat_df+E_misc
mu_net=pyro.deterministic("mu_net",mu_net_[~y_nan])
sigma_net = self.softplus(pyro.sample("sigma_t_unit", dist.Normal(mu_sigma_net,sigma_sigma_net).to_event(1)))
#print(f"sigma_net is {sigma_net}")
y_net_=y_net.flatten()[~y_nan]
with pyro.plate("data", size=mu_net.shape[0]):
obs_net=pyro.sample("obs_net", dist.Normal(mu_net, sigma_net).to_event(1), obs=y_net_.flatten()) # .to_event(1)
return mu_net,priors
def guide(self, y_net, t_out,
i_heat,i_heat_on,i_heat_off,
i_cool,i_cool_on,i_cool_off,
i_aux,i_aux_on,i_aux_off,i_heat_df,
priors=None):
self.batch_sz=t_out.shape[0]
device=t_out.device
if priors is None:
# add noise for priors
add_noise=1
noise_scale=0.001
noise_mean=0
priors={
"mu_misc":np.array([-3.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_misc~logN(-3,2.5) [0.0004,0.05,6.783]
"sigma_misc":np.array([1.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_beta0_heat":np.array([-3.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta0~N(-2,1.0) [-4.0~0.0]
"sigma_beta0_heat":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # exp(beta0) [0.018~1]
"mu_beta1_heat":np.array([-4.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta1~logN(-1.5,0.8) [0.04~1.0]
"sigma_beta1_heat":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
"sigma_heat":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_heat~logN(-1.2,0.6) [0.093,0.30,1.0]
"mu_heat_on":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_heat_on~Beta(mu_heat_on=0.5,sigma_heat_on=1/12) # 0~1 flat
"sigma_heat_on":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_heat_off":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_heat_off~Beta(mu_heat_on=0.5,sigma_heat_on=1/12) # 0~1 flat
"sigma_heat_off":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_beta0_cool":np.array([-2.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta0~N(-2,1.0) [-4.0~0.0]
"sigma_beta0_cool":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # exp(beta0) [0.018~1]
"mu_beta1_cool":np.array([-4.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta1~logN(-1.5,0.8) [0.04~1.0]
"sigma_beta1_cool":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
"sigma_cool":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_cool~logN(-1.2,0.6) [0.093,0.30,1.0]
"mu_cool_on":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_cool_on~Beta(mu_cool_on=0.5,sigma_cool_on=1/12) # 0~1 flat
"sigma_cool_on":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_cool_off":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_cool_off~Beta(mu_cool_on=0.5,sigma_cool_on=1/12) # 0~1 flat
"sigma_cool_off":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_aux":np.array([-3.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_aux~logN(-0.4,0.4) [0.3,0.67,1.43]
"sigma_aux":np.array([1.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_heat_df":np.array([-4.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_aux~logN(-0.4,0.4) [0.3,0.67,1.43]
"sigma_heat_df":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_aux_on":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_aux_on~Beta(mu_aux_on=0.5,sigma_aux_on=1/12) # 0~1 flat
"sigma_aux_on":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_aux_off":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_aux_off~Beta(mu_aux_on=0.5,sigma_aux_on=1/12) # 0~1 flat
"sigma_aux_off":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_phi_df":np.array([-1/3])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # phi~N(-1/3,1/6) give [-2/3,-1/3,0] which is [-10,0,10] in real scale
# "sigma_phi_df":np.array([1/6])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_psi":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # psi~Beta(mu_psi=0.5,sigma_psi=1/12) # 0~1 flat
# "sigma_psi":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
"mu_sigma_net":np.array([-4.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
"sigma_sigma_net":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise #
}
################### params ########################33
mu_misc=torch.tensor(priors['mu_misc'],dtype=torch.float32).to(device)
sigma_misc=torch.tensor(priors['sigma_misc'],dtype=torch.float32).to(device)
mu_beta0_heat=torch.tensor(priors['mu_beta0_heat'],dtype=torch.float32).to(device)
sigma_beta0_heat=torch.tensor(priors['sigma_beta0_heat'],dtype=torch.float32).to(device)
mu_beta1_heat=torch.tensor(priors['mu_beta1_heat'],dtype=torch.float32).to(device)
sigma_beta1_heat=torch.tensor(priors['sigma_beta1_heat'],dtype=torch.float32).to(device)
sigma_heat=torch.tensor(priors['sigma_heat'],dtype=torch.float32).to(device)
mu_heat_on=torch.tensor(priors['mu_heat_on'],dtype=torch.float32).to(device)
sigma_heat_on=torch.tensor(priors['sigma_heat_on'],dtype=torch.float32).to(device)
mu_heat_off=torch.tensor(priors['mu_heat_off'],dtype=torch.float32).to(device)
sigma_heat_off=torch.tensor(priors['sigma_heat_off'],dtype=torch.float32).to(device)
mu_beta0_cool=torch.tensor(priors['mu_beta0_cool'],dtype=torch.float32).to(device)
sigma_beta0_cool=torch.tensor(priors['sigma_beta0_cool'],dtype=torch.float32).to(device)
mu_beta1_cool=torch.tensor(priors['mu_beta1_cool'],dtype=torch.float32).to(device)
sigma_beta1_cool=torch.tensor(priors['sigma_beta1_cool'],dtype=torch.float32).to(device)
sigma_cool=torch.tensor(priors['sigma_cool'],dtype=torch.float32).to(device)
mu_cool_on=torch.tensor(priors['mu_cool_on'],dtype=torch.float32).to(device)
sigma_cool_on=torch.tensor(priors['sigma_cool_on'],dtype=torch.float32).to(device)
mu_cool_off=torch.tensor(priors['mu_cool_off'],dtype=torch.float32).to(device)
sigma_cool_off=torch.tensor(priors['sigma_cool_off'],dtype=torch.float32).to(device)
mu_aux=torch.tensor(priors['mu_aux'],dtype=torch.float32).to(device)
sigma_aux=torch.tensor(priors['sigma_aux'],dtype=torch.float32).to(device)
mu_aux_on=torch.tensor(priors['mu_aux_on'],dtype=torch.float32).to(device)
sigma_aux_on=torch.tensor(priors['sigma_aux_on'],dtype=torch.float32).to(device)
mu_aux_off=torch.tensor(priors['mu_aux_off'],dtype=torch.float32).to(device)
sigma_aux_off=torch.tensor(priors['sigma_aux_off'],dtype=torch.float32).to(device)
mu_heat_df=torch.tensor(priors['mu_heat_df'],dtype=torch.float32).to(device)
sigma_heat_df=torch.tensor(priors['sigma_heat_df'],dtype=torch.float32).to(device)
#mu_phi_df=torch.tensor(priors['mu_phi_df'],dtype=torch.float32).to(device)
#sigma_phi_df=torch.tensor(priors['sigma_phi_df'],dtype=torch.float32).to(device)
#mu_psi=torch.tensor(priors['mu_psi'],dtype=torch.float32).to(device)
#sigma_psi=torch.tensor(priors['sigma_psi'],dtype=torch.float32).to(device)
mu_sigma_net=torch.tensor(priors['mu_sigma_net'],dtype=torch.float32).to(device)
sigma_sigma_net=torch.tensor(priors['sigma_sigma_net'],dtype=torch.float32).to(device)
E_misc=self.softplus(pyro.sample("E_misc",dist.Normal(mu_misc,sigma_misc).to_event(1)))
# here mu_heat is not real scale. E_heat~LogNormal(mu_heat,sigma_heat)
beta0_heat=pyro.sample("beta0_heat",dist.Normal(mu_beta0_heat,sigma_beta0_heat).to_event(1))
beta1_heat=self.softplus(pyro.sample("beta1_heat",dist.Normal(mu_beta1_heat,sigma_beta1_heat).to_event(1)))
mu_heat=pyro.deterministic("mu_heat",beta0_heat+beta1_heat*t_out)
E_heat=self.softplus(pyro.sample("E_heat",dist.Normal(mu_heat,sigma_heat).to_event(1)))
#print(f"E_heat shape is {E_heat.shape}")
beta0_cool=pyro.sample("beta0_cool",dist.Normal(mu_beta0_cool,sigma_beta0_cool).to_event(1))
beta1_cool=self.softplus(pyro.sample("beta1_cool",dist.Normal(mu_beta1_cool,sigma_beta1_cool).to_event(1)))
mu_cool=pyro.deterministic("mu_cool",beta0_cool+beta1_cool*t_out)
E_cool=self.softplus(pyro.sample("E_cool",dist.Normal(mu_cool,sigma_cool).to_event(1)))
E_aux=self.softplus(pyro.sample("E_aux",dist.Normal(mu_aux,sigma_aux).to_event(1)))
# phi_df=pyro.sample("phi_df",dist.Normal(mu_phi_df,sigma_phi_df).to_event(1))
# mu_psi_alpha,mu_psi_beta=self.calculate_concentration(mu=mu_psi,sigma=sigma_psi)
# psi=pyro.sample("psi",dist.Beta(concentration1=mu_psi_alpha ,concentration0=mu_psi_beta).to_event(1))
eta_heat=i_heat.clone()
mu_heat_on_alpha,mu_heat_on_beta=self.calculate_concentration(mu=mu_heat_on,sigma=sigma_heat_on)
mu_heat_off_alpha,mu_heat_off_beta=self.calculate_concentration(mu=mu_heat_off,sigma=sigma_heat_off)
# print(f'mu_heat_on_alpha is {mu_heat_on_alpha}')
# print(f'mu_heat_on_beta is {mu_heat_on_beta}')
eta_heat_on=pyro.sample("eta_heat_on",dist.Beta(concentration1=mu_heat_on_alpha ,concentration0=mu_heat_on_beta).to_event(1))
eta_heat_off=pyro.sample("eta_heat_off",dist.Beta(concentration1=mu_heat_off_alpha ,concentration0=mu_heat_off_beta).to_event(1))
eta_heat[i_heat_on==1]=eta_heat_on#[i_heat_on==1]
eta_heat[i_heat_off==1]=eta_heat_off#[i_heat_off==1]
eta_cool=i_cool.clone()
mu_cool_on_alpha,mu_cool_on_beta=self.calculate_concentration(mu=mu_cool_on,sigma=sigma_cool_on)
mu_cool_off_alpha,mu_cool_off_beta=self.calculate_concentration(mu=mu_cool_off,sigma=sigma_cool_off)
eta_cool_on=pyro.sample("eta_cool_on",dist.Beta(concentration1=mu_cool_on_alpha ,concentration0=mu_cool_on_beta).to_event(1))
eta_cool_off=pyro.sample("eta_cool_off",dist.Beta(concentration1=mu_cool_off_alpha ,concentration0=mu_cool_off_beta).to_event(1))
eta_cool[i_cool_on==1]=eta_cool_on#[i_cool_on==1]
eta_cool[i_cool_off==1]=eta_cool_off#[i_cool_off==1]
eta_aux=i_aux.clone()
mu_aux_on_alpha,mu_aux_on_beta=self.calculate_concentration(mu=mu_aux_on,sigma=sigma_aux_on)
mu_aux_off_alpha,mu_aux_off_beta=self.calculate_concentration(mu=mu_aux_off,sigma=sigma_aux_off)
eta_aux_on=pyro.sample("eta_aux_on",dist.Beta(concentration1=mu_aux_on_alpha ,concentration0=mu_aux_on_beta).to_event(1))
eta_aux_off=pyro.sample("eta_aux_off",dist.Beta(concentration1=mu_aux_off_alpha ,concentration0=mu_aux_off_beta).to_event(1))
eta_aux[i_aux_on==1]=eta_aux_on#[i_aux_on==1]
eta_aux[i_aux_off==1]=eta_aux_off#[i_aux_off==1]
E_heat_df=self.softplus(pyro.sample("E_heat_df",dist.Normal(mu_heat_df,sigma_heat_df).to_event(1)))
#i_df=torch.zeros_like(i_heat).to(device)
# https://pytorch.org/docs/stable/distributions.html#torch.distributions.beta.Beta.concentration1
# concentration1 (float or Tensor) – 1st concentration parameter of the distribution (often referred to as alpha)
# concentration0 (float or Tensor) – 2nd concentration parameter of the distribution (often referred to as beta)
#with pyro.plate("Emisc", size=t_out.shape[0]):
#i_df_on=pyro.sample("i_df_on",dist.Binomial(total_count=1,probs=psi))
#i_df=torch.where((i_heat==torch.tensor(1,dtype=torch.float32))&(t_out<phi_df),i_df_on,i_df)
y_nan=torch.any(torch.cat([torch.isnan(i_heat)[:,None],
torch.isnan(i_cool)[:,None],
torch.isnan(i_aux)[:,None],
torch.isnan(t_out)[:,None],
torch.isnan(i_heat_df)[:,None],
torch.isnan(y_net)[:,None]
],dim=1),axis=1)
#print(f'y_nan is {y_nan}')
# print(f'eta_heat is {eta_heat}')
# print(f'i_heat is {i_heat}')
mu_net_=eta_heat*i_heat*E_heat+eta_cool*i_cool*E_cool+(eta_aux*i_aux)*E_aux+(i_heat_df)*E_heat_df+E_misc
mu_net=pyro.deterministic("mu_net",mu_net_[~y_nan])
sigma_net = self.softplus(pyro.sample("sigma_t_unit", dist.Normal(mu_sigma_net,sigma_sigma_net).to_event(1)))
#print(f"sigma_net is {sigma_net}")
y_net_=y_net.flatten()[~y_nan]
# class ResNIHCM(nn.Module):
# def __init__(self):
# super().__init__()
# # define dimensions
# # Use ELU see Murphy p.397
# self.elu=nn.ELU()
# self.relu=nn.ReLU()
# self.softmax=nn.Softmax(dim=1)
# self.tanh=nn.Tanh()
# self.softplus=nn.Softplus()
# def calculate_concentration(self,mu,sigma):
# concentration_alpha=((1-mu)/(sigma**2)-1/mu)*(mu**2)
# concentration_beta=concentration_alpha*(1/mu-1)
# return concentration_alpha, concentration_beta
# def model(self, y_net, t_out,
# i_heat,i_heat_on,i_heat_off,
# i_cool,i_cool_on,i_cool_off,
# i_aux,i_aux_on,i_aux_off,
# priors=None):
# # it is hard to generalize the process.
# # we may have matrix,
# # initial network
# self.batch_sz=t_out.shape[0]
# device=t_out.device
# if priors is None:
# add_noise=0 # no noise addition for priors
# noise_scale=0.01
# noise_mean=0
# priors={
# "mu_misc":np.array([-3.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_misc~logN(-3,2.5) [0.0004,0.05,6.783]
# "sigma_misc":np.array([3.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_beta0_heat":np.array([-2.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta0~N(-2,1.0) [-4.0~0.0]
# "sigma_beta0_heat":np.array([1.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # exp(beta0) [0.018~1]
# "mu_beta1_heat":np.array([-2.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta1~logN(-1.5,0.8) [0.04~1.0]
# "sigma_beta1_heat":np.array([1.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
# "sigma_heat":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_heat~logN(-1.2,0.6) [0.093,0.30,1.0]
# "mu_heat_on":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_heat_on~Beta(mu_heat_on=0.5,sigma_heat_on=1/12) # 0~1 flat
# "sigma_heat_on":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_heat_off":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_heat_off~Beta(mu_heat_on=0.5,sigma_heat_on=1/12) # 0~1 flat
# "sigma_heat_off":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_beta0_cool":np.array([-2.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta0~N(-2,1.0) [-4.0~0.0]
# "sigma_beta0_cool":np.array([1.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # exp(beta0) [0.018~1]
# "mu_beta1_cool":np.array([-2.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta1~logN(-1.5,0.8) [0.04~1.0]
# "sigma_beta1_cool":np.array([1.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
# "sigma_cool":np.array([1.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_cool~logN(-1.2,0.6) [0.093,0.30,1.0]
# "mu_cool_on":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_cool_on~Beta(mu_cool_on=0.5,sigma_cool_on=1/12) # 0~1 flat
# "sigma_cool_on":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_cool_off":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_cool_off~Beta(mu_cool_on=0.5,sigma_cool_on=1/12) # 0~1 flat
# "sigma_cool_off":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_aux":np.array([-0.4])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_aux~logN(-0.4,0.4) [0.3,0.67,1.43]
# "sigma_aux":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_heat_df":np.array([-3.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_aux~logN(-0.4,0.4) [0.3,0.67,1.43]
# "sigma_heat_df":np.array([1.])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_aux_on":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_aux_on~Beta(mu_aux_on=0.5,sigma_aux_on=1/12) # 0~1 flat
# "sigma_aux_on":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_aux_off":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_aux_off~Beta(mu_aux_on=0.5,sigma_aux_on=1/12) # 0~1 flat
# "sigma_aux_off":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_phi_df":np.array([-1/3])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # phi~N(-1/3,1/6) give [-2/3,-1/3,0] which is [-10,0,10] in real scale
# "sigma_phi_df":np.array([1/6])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_psi":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # psi~Beta(mu_psi=0.5,sigma_psi=1/12) # 0~1 flat
# "sigma_psi":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
# "mu_sigma_net":np.array([-4.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
# "sigma_sigma_net":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise #
# }
# mu_misc=torch.tensor(priors['mu_misc'],dtype=torch.float32).to(device)
# sigma_misc=torch.tensor(priors['sigma_misc'],dtype=torch.float32).to(device)
# mu_beta0_heat=torch.tensor(priors['mu_beta0_heat'],dtype=torch.float32).to(device)
# sigma_beta0_heat=torch.tensor(priors['sigma_beta0_heat'],dtype=torch.float32).to(device)
# mu_beta1_heat=torch.tensor(priors['mu_beta1_heat'],dtype=torch.float32).to(device)
# sigma_beta1_heat=torch.tensor(priors['sigma_beta1_heat'],dtype=torch.float32).to(device)
# sigma_heat=torch.tensor(priors['sigma_heat'],dtype=torch.float32).to(device)
# mu_heat_on=torch.tensor(priors['mu_heat_on'],dtype=torch.float32).to(device)
# sigma_heat_on=torch.tensor(priors['sigma_heat_on'],dtype=torch.float32).to(device)
# mu_heat_off=torch.tensor(priors['mu_heat_off'],dtype=torch.float32).to(device)
# sigma_heat_off=torch.tensor(priors['sigma_heat_off'],dtype=torch.float32).to(device)
# mu_beta0_cool=torch.tensor(priors['mu_beta0_cool'],dtype=torch.float32).to(device)
# sigma_beta0_cool=torch.tensor(priors['sigma_beta0_cool'],dtype=torch.float32).to(device)
# mu_beta1_cool=torch.tensor(priors['mu_beta1_cool'],dtype=torch.float32).to(device)
# sigma_beta1_cool=torch.tensor(priors['sigma_beta1_cool'],dtype=torch.float32).to(device)
# sigma_cool=torch.tensor(priors['sigma_cool'],dtype=torch.float32).to(device)
# mu_cool_on=torch.tensor(priors['mu_cool_on'],dtype=torch.float32).to(device)
# sigma_cool_on=torch.tensor(priors['sigma_cool_on'],dtype=torch.float32).to(device)
# mu_cool_off=torch.tensor(priors['mu_cool_off'],dtype=torch.float32).to(device)
# sigma_cool_off=torch.tensor(priors['sigma_cool_off'],dtype=torch.float32).to(device)
# mu_aux=torch.tensor(priors['mu_aux'],dtype=torch.float32).to(device)
# sigma_aux=torch.tensor(priors['sigma_aux'],dtype=torch.float32).to(device)
# mu_aux_on=torch.tensor(priors['mu_aux_on'],dtype=torch.float32).to(device)
# sigma_aux_on=torch.tensor(priors['sigma_aux_on'],dtype=torch.float32).to(device)
# mu_aux_off=torch.tensor(priors['mu_aux_off'],dtype=torch.float32).to(device)
# sigma_aux_off=torch.tensor(priors['sigma_aux_off'],dtype=torch.float32).to(device)
# #mu_phi_df=torch.tensor(priors['mu_phi_df'],dtype=torch.float32).to(device)
# #sigma_phi_df=torch.tensor(priors['sigma_phi_df'],dtype=torch.float32).to(device)
# #mu_psi=torch.tensor(priors['mu_psi'],dtype=torch.float32).to(device)
# #sigma_psi=torch.tensor(priors['sigma_psi'],dtype=torch.float32).to(device)
# mu_sigma_net=torch.tensor(priors['mu_sigma_net'],dtype=torch.float32).to(device)
# sigma_sigma_net=torch.tensor(priors['sigma_sigma_net'],dtype=torch.float32).to(device)
# E_misc=pyro.sample("E_misc",dist.LogNormal(mu_misc,sigma_misc).to_event(1))
# # here mu_heat is not real scale. E_heat~LogNormal(mu_heat,sigma_heat)
# beta0_heat=pyro.sample("beta0_heat",dist.Normal(mu_beta0_heat,sigma_beta0_heat).to_event(1))
# beta1_heat=pyro.sample("beta1_heat",dist.LogNormal(mu_beta1_heat,sigma_beta1_heat).to_event(1))
# mu_heat=pyro.deterministic("mu_heat",beta0_heat+beta1_heat*t_out)
# E_heat=pyro.sample("E_heat",dist.LogNormal(mu_heat,sigma_heat).to_event(1))
# #print(f"E_heat shape is {E_heat.shape}")
# beta0_cool=pyro.sample("beta0_cool",dist.Normal(mu_beta0_cool,sigma_beta0_cool).to_event(1))
# beta1_cool=pyro.sample("beta1_cool",dist.LogNormal(mu_beta1_cool,sigma_beta1_cool).to_event(1))
# mu_cool=pyro.deterministic("mu_cool",beta0_cool+beta1_cool*t_out)
# E_cool=pyro.sample("E_cool",dist.LogNormal(mu_cool,sigma_cool).to_event(1))
# E_aux=pyro.sample("E_aux",dist.LogNormal(mu_aux,sigma_aux).to_event(1))
# # phi_df=pyro.sample("phi_df",dist.Normal(mu_phi_df,sigma_phi_df).to_event(1))
# # mu_psi_alpha,mu_psi_beta=self.calculate_concentration(mu=mu_psi,sigma=sigma_psi)
# # psi=pyro.sample("psi",dist.Beta(concentration1=mu_psi_alpha ,concentration0=mu_psi_beta).to_event(1))
# eta_heat=i_heat.clone()
# mu_heat_on_alpha,mu_heat_on_beta=self.calculate_concentration(mu=mu_heat_on,sigma=sigma_heat_on)
# mu_heat_off_alpha,mu_heat_off_beta=self.calculate_concentration(mu=mu_heat_off,sigma=sigma_heat_off)
# # print(f'mu_heat_on_alpha is {mu_heat_on_alpha}')
# # print(f'mu_heat_on_beta is {mu_heat_on_beta}')
# eta_heat_on=pyro.sample("eta_heat_on",dist.Beta(concentration1=mu_heat_on_alpha ,concentration0=mu_heat_on_beta).to_event(1))
# eta_heat_off=pyro.sample("eta_heat_off",dist.Beta(concentration1=mu_heat_off_alpha ,concentration0=mu_heat_off_beta).to_event(1))
# eta_heat[i_heat_on==1]=eta_heat_on#[i_heat_on==1]
# eta_heat[i_heat_off==1]=eta_heat_off#[i_heat_off==1]
# eta_cool=i_cool.clone()
# mu_cool_on_alpha,mu_cool_on_beta=self.calculate_concentration(mu=mu_cool_on,sigma=sigma_cool_on)
# mu_cool_off_alpha,mu_cool_off_beta=self.calculate_concentration(mu=mu_cool_off,sigma=sigma_cool_off)
# eta_cool_on=pyro.sample("eta_cool_on",dist.Beta(concentration1=mu_cool_on_alpha ,concentration0=mu_cool_on_beta).to_event(1))
# eta_cool_off=pyro.sample("eta_cool_off",dist.Beta(concentration1=mu_cool_off_alpha ,concentration0=mu_cool_off_beta).to_event(1))
# eta_cool[i_cool_on==1]=eta_cool_on#[i_cool_on==1]
# eta_cool[i_cool_off==1]=eta_cool_off#[i_cool_off==1]
# eta_aux=i_aux.clone()
# mu_aux_on_alpha,mu_aux_on_beta=self.calculate_concentration(mu=mu_aux_on,sigma=sigma_aux_on)
# mu_aux_off_alpha,mu_aux_off_beta=self.calculate_concentration(mu=mu_aux_off,sigma=sigma_aux_off)
# eta_aux_on=pyro.sample("eta_aux_on",dist.Beta(concentration1=mu_aux_on_alpha ,concentration0=mu_aux_on_beta).to_event(1))
# eta_aux_off=pyro.sample("eta_aux_off",dist.Beta(concentration1=mu_aux_off_alpha ,concentration0=mu_aux_off_beta).to_event(1))
# eta_aux[i_aux_on==1]=eta_aux_on#[i_aux_on==1]
# eta_aux[i_aux_off==1]=eta_aux_off#[i_aux_off==1]
# #i_df=torch.zeros_like(i_heat).to(device)
# # https://pytorch.org/docs/stable/distributions.html#torch.distributions.beta.Beta.concentration1
# # concentration1 (float or Tensor) – 1st concentration parameter of the distribution (often referred to as alpha)
# # concentration0 (float or Tensor) – 2nd concentration parameter of the distribution (often referred to as beta)
# #with pyro.plate("Emisc", size=t_out.shape[0]):
# #i_df_on=pyro.sample("i_df_on",dist.Binomial(total_count=1,probs=psi))
# #i_df=torch.where((i_heat==torch.tensor(1,dtype=torch.float32))&(t_out<phi_df),i_df_on,i_df)
# y_nan=torch.any(torch.cat([torch.isnan(i_heat)[:,None],
# torch.isnan(i_cool)[:,None],
# torch.isnan(i_aux)[:,None],
# torch.isnan(t_out)[:,None],
# torch.isnan(y_net)[:,None]
# ],dim=1),axis=1)
# #print(f'y_nan is {y_nan}')
# # print(f'eta_heat is {eta_heat}')
# # print(f'i_heat is {i_heat}')
# mu_net_=eta_heat*i_heat*E_heat+eta_cool*i_cool*E_cool+(eta_aux*i_aux+i_df)*E_aux+E_misc
# mu_net=pyro.deterministic("mu_net",mu_net_[~y_nan])
# sigma_net = pyro.sample("sigma_t_unit", dist.LogNormal(mu_sigma_net,sigma_sigma_net).to_event(1))
# #print(f"sigma_net is {sigma_net}")
# y_net_=y_net.flatten()[~y_nan]
# with pyro.plate("data", size=mu_net.shape[0]):
# obs_net=pyro.sample("obs_net", dist.Normal(mu_net, sigma_net).to_event(1), obs=y_net_.flatten()) # .to_event(1)
# return mu_net,priors
# def guide(self, y_net, t_out,
# i_heat,i_heat_on,i_heat_off,
# i_cool,i_cool_on,i_cool_off,
# i_aux,i_aux_on,i_aux_off,
# priors=None):
# self.batch_sz=t_out.shape[0]
# device=t_out.device
# if priors is None:
# # add noise for priors
# add_noise=1
# noise_scale=0.001
# noise_mean=0
# priors={
# "mu_misc":np.array([-3.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_misc~logN(-3,2.5) [0.0004,0.05,6.783]
# "sigma_misc":np.array([3.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_beta0_heat":np.array([-2.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta0~N(-2,1.0) [-4.0~0.0]
# "sigma_beta0_heat":np.array([1.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # exp(beta0) [0.018~1]
# "mu_beta1_heat":np.array([-2.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta1~logN(-1.5,0.8) [0.04~1.0]
# "sigma_beta1_heat":np.array([1.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
# "sigma_heat":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_heat~logN(-1.2,0.6) [0.093,0.30,1.0]
# "mu_heat_on":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_heat_on~Beta(mu_heat_on=0.5,sigma_heat_on=1/12) # 0~1 flat
# "sigma_heat_on":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_heat_off":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_heat_off~Beta(mu_heat_on=0.5,sigma_heat_on=1/12) # 0~1 flat
# "sigma_heat_off":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_beta0_cool":np.array([-2.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta0~N(-2,1.0) [-4.0~0.0]
# "sigma_beta0_cool":np.array([1.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # exp(beta0) [0.018~1]
# "mu_beta1_cool":np.array([-2.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta1~logN(-1.5,0.8) [0.04~1.0]
# "sigma_beta1_cool":np.array([1.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
# "sigma_cool":np.array([1.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_cool~logN(-1.2,0.6) [0.093,0.30,1.0]
# "mu_cool_on":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_cool_on~Beta(mu_cool_on=0.5,sigma_cool_on=1/12) # 0~1 flat
# "sigma_cool_on":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_cool_off":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_cool_off~Beta(mu_cool_on=0.5,sigma_cool_on=1/12) # 0~1 flat
# "sigma_cool_off":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_aux":np.array([-0.4])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_aux~logN(-0.4,0.4) [0.3,0.67,1.43]
# "sigma_aux":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_aux_on":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_aux_on~Beta(mu_aux_on=0.5,sigma_aux_on=1/12) # 0~1 flat
# "sigma_aux_on":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_aux_off":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_aux_off~Beta(mu_aux_on=0.5,sigma_aux_on=1/12) # 0~1 flat
# "sigma_aux_off":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_phi_df":np.array([-1/3])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # phi~N(-1/3,1/6) give [-2/3,-1/3,0] which is [-10,0,10] in real scale
# "sigma_phi_df":np.array([1/6])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_psi":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # psi~Beta(mu_psi=0.5,sigma_psi=1/12) # 0~1 flat
# "sigma_psi":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
# "mu_sigma_net":np.array([-4.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
# "sigma_sigma_net":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise #
# }
# ################### params ########################33
# mu_misc=pyro.param("mu_misc",torch.tensor(priors['mu_misc'],dtype=torch.float32).to(device))
# sigma_misc=pyro.param("sigma_misc",torch.tensor(priors['sigma_misc'],dtype=torch.float32).to(device),constraints.positive)
# mu_beta0_heat=pyro.param("mu_beta0_heat",torch.tensor(priors['mu_beta0_heat'],dtype=torch.float32).to(device))
# sigma_beta0_heat=pyro.param("sigma_beta0_heat",torch.tensor(priors['sigma_beta0_heat'],dtype=torch.float32).to(device),constraints.positive)
# mu_beta1_heat=pyro.param("mu_beta1_heat",torch.tensor(priors['mu_beta1_heat'],dtype=torch.float32).to(device))
# sigma_beta1_heat=pyro.param("sigma_beta1_heat",torch.tensor(priors['sigma_beta1_heat'],dtype=torch.float32).to(device),constraints.positive)
# sigma_heat=pyro.param("sigma_heat",torch.tensor(priors['sigma_heat'],dtype=torch.float32).to(device),constraints.positive)
# mu_heat_on=pyro.param("mu_heat_on",torch.tensor(priors['mu_heat_on'],dtype=torch.float32).to(device))
# sigma_heat_on=pyro.param("sigma_heat_on",torch.tensor(priors['sigma_heat_on'],dtype=torch.float32).to(device),constraints.positive)
# mu_heat_off=pyro.param("mu_heat_off",torch.tensor(priors['mu_heat_off'],dtype=torch.float32).to(device))
# sigma_heat_off=pyro.param("sigma_heat_off",torch.tensor(priors['sigma_heat_off'],dtype=torch.float32).to(device),constraints.positive)
# mu_beta0_cool=pyro.param("mu_beta0_cool",torch.tensor(priors['mu_beta0_cool'],dtype=torch.float32).to(device))
# sigma_beta0_cool=pyro.param("sigma_beta0_cool",torch.tensor(priors['sigma_beta0_cool'],dtype=torch.float32).to(device),constraints.positive)
# mu_beta1_cool=pyro.param("mu_beta1_cool",torch.tensor(priors['mu_beta1_cool'],dtype=torch.float32).to(device))
# sigma_beta1_cool=pyro.param("sigma_beta1_cool",torch.tensor(priors['sigma_beta1_cool'],dtype=torch.float32).to(device),constraints.positive)
# sigma_cool=pyro.param("sigma_cool",torch.tensor(priors['sigma_cool'],dtype=torch.float32).to(device),constraints.positive)
# mu_cool_on=pyro.param("mu_cool_on",torch.tensor(priors['mu_cool_on'],dtype=torch.float32).to(device))
# sigma_cool_on=pyro.param("sigma_cool_on",torch.tensor(priors['sigma_cool_on'],dtype=torch.float32).to(device),constraints.positive)
# mu_cool_off=pyro.param("mu_cool_off",torch.tensor(priors['mu_cool_off'],dtype=torch.float32).to(device))
# sigma_cool_off=pyro.param("sigma_cool_off",torch.tensor(priors['sigma_cool_off'],dtype=torch.float32).to(device),constraints.positive)
# mu_aux=pyro.param("mu_aux",torch.tensor(priors['mu_aux'],dtype=torch.float32).to(device))
# sigma_aux=pyro.param("sigma_aux",torch.tensor(priors['sigma_aux'],dtype=torch.float32).to(device),constraints.positive)
# mu_aux_on=pyro.param("mu_aux_on",torch.tensor(priors['mu_aux_on'],dtype=torch.float32).to(device))
# sigma_aux_on=pyro.param("sigma_aux_on",torch.tensor(priors['sigma_aux_on'],dtype=torch.float32).to(device),constraints.positive)
# mu_aux_off=pyro.param("mu_aux_off",torch.tensor(priors['mu_aux_off'],dtype=torch.float32).to(device))
# sigma_aux_off=pyro.param("sigma_aux_off",torch.tensor(priors['sigma_aux_off'],dtype=torch.float32).to(device),constraints.positive)
# mu_phi_df=pyro.param("mu_phi_df",torch.tensor(priors['mu_phi_df'],dtype=torch.float32).to(device))
# sigma_phi_df=pyro.param("sigma_phi_df",torch.tensor(priors['sigma_phi_df'],dtype=torch.float32).to(device),constraints.positive)
# mu_psi=pyro.param("mu_psi",torch.tensor(priors['mu_psi'],dtype=torch.float32).to(device))
# sigma_psi=pyro.param("sigma_psi",torch.tensor(priors['sigma_psi'],dtype=torch.float32).to(device),constraints.positive)
# mu_sigma_net=pyro.param("mu_sigma_net",torch.tensor(priors['mu_sigma_net'],dtype=torch.float32).to(device))
# sigma_sigma_net=pyro.param("sigma_sigma_net",torch.tensor(priors['sigma_sigma_net'],dtype=torch.float32).to(device),constraints.positive)
# ########33# samples
# E_misc=pyro.sample("E_misc",dist.LogNormal(mu_misc,sigma_misc).to_event(1))
# #print(E_misc.dtype)
# # here mu_heat is not real scale. E_heat~LogNormal(mu_heat,sigma_heat)
# # here mu_heat is not real scale. E_heat~LogNormal(mu_heat,sigma_heat)
# beta0_heat=pyro.sample("beta0_heat",dist.Normal(mu_beta0_heat,sigma_beta0_heat).to_event(1))
# beta1_heat=pyro.sample("beta1_heat",dist.LogNormal(mu_beta1_heat,sigma_beta1_heat).to_event(1))
# mu_heat=pyro.deterministic("mu_heat",beta0_heat+beta1_heat*t_out)
# E_heat=pyro.sample("E_heat",dist.LogNormal(mu_heat,sigma_heat).to_event(1))
# #print(f"E_heat shape is {E_heat.shape}")
# beta0_cool=pyro.sample("beta0_cool",dist.Normal(mu_beta0_cool,sigma_beta0_cool).to_event(1))
# beta1_cool=pyro.sample("beta1_cool",dist.LogNormal(mu_beta1_cool,sigma_beta1_cool).to_event(1))
# mu_cool=pyro.deterministic("mu_cool",beta0_cool+beta1_cool*t_out)
# E_cool=pyro.sample("E_cool",dist.LogNormal(mu_cool,sigma_cool).to_event(1))
# E_aux=pyro.sample("E_aux",dist.LogNormal(mu_aux,sigma_aux).to_event(1))
# phi_df=pyro.sample("phi_df",dist.Normal(mu_phi_df,sigma_phi_df).to_event(1))
# mu_psi_alpha,mu_psi_beta=self.calculate_concentration(mu=mu_psi,sigma=sigma_psi)
# psi=pyro.sample("psi",dist.Beta(concentration1=mu_psi_alpha ,concentration0=mu_psi_beta).to_event(1))
# eta_heat=i_heat.clone()
# mu_heat_on_alpha,mu_heat_on_beta=self.calculate_concentration(mu=mu_heat_on,sigma=sigma_heat_on)
# mu_heat_off_alpha,mu_heat_off_beta=self.calculate_concentration(mu=mu_heat_off,sigma=sigma_heat_off)
# # print(f'mu_heat_on_alpha is {mu_heat_on_alpha}')
# # print(f'mu_heat_on_beta is {mu_heat_on_beta}')
# eta_heat_on=pyro.sample("eta_heat_on",dist.Beta(concentration1=mu_heat_on_alpha ,concentration0=mu_heat_on_beta).to_event(1))
# eta_heat_off=pyro.sample("eta_heat_off",dist.Beta(concentration1=mu_heat_off_alpha ,concentration0=mu_heat_off_beta).to_event(1))
# eta_heat[i_heat_on==1]=eta_heat_on#[i_heat_on==1]
# eta_heat[i_heat_off==1]=eta_heat_off#[i_heat_off==1]
# eta_cool=i_cool.clone()
# mu_cool_on_alpha,mu_cool_on_beta=self.calculate_concentration(mu=mu_cool_on,sigma=sigma_cool_on)
# mu_cool_off_alpha,mu_cool_off_beta=self.calculate_concentration(mu=mu_cool_off,sigma=sigma_cool_off)
# eta_cool_on=pyro.sample("eta_cool_on",dist.Beta(concentration1=mu_cool_on_alpha ,concentration0=mu_cool_on_beta).to_event(1))
# eta_cool_off=pyro.sample("eta_cool_off",dist.Beta(concentration1=mu_cool_off_alpha ,concentration0=mu_cool_off_beta).to_event(1))
# eta_cool[i_cool_on==1]=eta_cool_on#[i_cool_on==1]
# eta_cool[i_cool_off==1]=eta_cool_off#[i_cool_off==1]
# eta_aux=i_aux.clone()
# mu_aux_on_alpha,mu_aux_on_beta=self.calculate_concentration(mu=mu_aux_on,sigma=sigma_aux_on)
# mu_aux_off_alpha,mu_aux_off_beta=self.calculate_concentration(mu=mu_aux_off,sigma=sigma_aux_off)
# eta_aux_on=pyro.sample("eta_aux_on",dist.Beta(concentration1=mu_aux_on_alpha ,concentration0=mu_aux_on_beta).to_event(1))
# eta_aux_off=pyro.sample("eta_aux_off",dist.Beta(concentration1=mu_aux_off_alpha ,concentration0=mu_aux_off_beta).to_event(1))
# eta_aux[i_aux_on==1]=eta_aux_on#[i_aux_on==1]
# eta_aux[i_aux_off==1]=eta_aux_off#[i_aux_off==1]
# i_df=torch.zeros_like(i_heat).to(device)
# # https://pytorch.org/docs/stable/distributions.html#torch.distributions.beta.Beta.concentration1
# # concentration1 (float or Tensor) – 1st concentration parameter of the distribution (often referred to as alpha)
# # concentration0 (float or Tensor) – 2nd concentration parameter of the distribution (often referred to as beta)
# #with pyro.plate("Emisc", size=t_out.shape[0]):
# #i_df_on=pyro.sample("i_df_on",dist.Binomial(total_count=1,probs=psi))
# #i_df=torch.where((i_heat==torch.tensor(1,dtype=torch.float32))&(t_out<phi_df),i_df_on,i_df)
# y_nan=torch.any(torch.cat([torch.isnan(i_heat)[:,None],
# torch.isnan(i_cool)[:,None],
# torch.isnan(i_aux)[:,None],
# torch.isnan(t_out)[:,None],
# torch.isnan(y_net)[:,None]
# ],dim=1),axis=1)
# #print(f'y_nan is {y_nan}')
# # print(f'eta_heat is {eta_heat}')
# # print(f'i_heat is {i_heat}')
# mu_net_=eta_heat*i_heat*E_heat+eta_cool*i_cool*E_cool+(eta_aux*i_aux+i_df)*E_aux+E_misc
# mu_net=pyro.deterministic("mu_net",mu_net_[~y_nan])
# sigma_net = pyro.sample("sigma_t_unit", dist.LogNormal(mu_sigma_net,sigma_sigma_net).to_event(1))
# #print(f"sigma_net is {sigma_net}")
# y_net_=y_net.flatten()[~y_nan]
# mu_misc=(-3.0*torch.ones(1)).to(device)
# sigma_misc=(2.5*torch.ones(1)).to(device)
# beta0_heat=(-2*torch.ones(1)).to(device) # t_out -1~1 slope probably - for heating. not large (or decide after plotting)
# beta1_heat=(-2*torch.ones(1)).to(device) # t_out -1~1 slope probably - for heating. not large (or decide after plotting)
# sigma_heat=(1.0*torch.ones(1)).to(device)
# mu_heat_on=(1*torch.ones(1)).to(device)
# sigma_heat_on=(0.25*torch.ones(1)).to(device)
# mu_heat_off=(1*torch.ones(1)).to(device)
# sigma_heat_off=(0.25*torch.ones(1)).to(device)
# beta0_cool=(-2*torch.ones(1)).to(device) # t_out -1~1 slope probably - for heating. not large (or decide after plotting)
# beta1_cool=(-2*torch.ones(1)).to(device) # t_out -1~1 slope probably - for heating. not large (or decide after plotting)
# sigma_cool=(1.0*torch.ones(1)).to(device)
# mu_cool_on=(1*torch.ones(1)).to(device)
# sigma_cool_on=(0.25*torch.ones(1)).to(device)
# mu_cool_off=(1*torch.ones(1)).to(device)
# sigma_cool_off=(0.25*torch.ones(1)).to(device)
# mu_aux=(-0.7*torch.ones(1)).to(device)
# sigma_aux=(0.6*torch.ones(1)).to(device)
# mu_aux_on=(1*torch.ones(1)).to(device)
# sigma_aux_on=(0.25*torch.ones(1)).to(device)
# mu_aux_off=(1*torch.ones(1)).to(device)
# sigma_aux_off=(0.25*torch.ones(1)).to(device)
# mu_phi_df=(0*torch.ones(1)).to(device)
# sigma_phi_df=(0*torch.ones(1)).to(device)
# mu_psi=(1*torch.ones(1)).to(device)
# sigma_psi=(0.25*torch.ones(1)).to(device)
# mu_sigma_net=(1*torch.ones(1)).to(device)
# sigma_sigma_net=(0.25*torch.ones(1)).to(device)
##########Guide
# mu_misc=pyro.param("mu_misc",(0.01*torch.randn(1)).to(device))
# sigma_misc=pyro.param("sigma_misc",(0.05*torch.abs(torch.randn(1))).to(device),constraints.positive)
# mu_beta0_heat=pyro.param("mu_beta0_heat",(0.01*torch.randn(1)).to(device))
# sigma_beta0_heat=pyro.param("sigma_beta0_heat",(0.05*torch.abs(torch.randn(1))).to(device),constraints.positive)
# mu_beta1_heat=pyro.param("mu_beta1_heat",(0.01*torch.randn(1)).to(device))
# sigma_beta1_heat=pyro.param("sigma_beta1_heat",(0.05*torch.abs(torch.randn(1))).to(device),constraints.positive)
# sigma_heat=pyro.param("sigma_heat",(0.05*torch.abs(torch.randn(1))).to(device),constraints.positive)
# mu_heat_on=pyro.param("mu_heat_on",(0.01*torch.randn(1)).to(device))
# sigma_heat_on=pyro.param("sigma_heat_on",(0.05*torch.abs(torch.randn(1))).to(device),constraints.positive)
# mu_heat_off=pyro.param("mu_heat_off",(0.01*torch.randn(1)).to(device))
# sigma_heat_off=pyro.param("sigma_heat_off",(0.05*torch.abs(torch.randn(1))).to(device),constraints.positive)
# mu_beta0_cool=pyro.param("mu_beta0_cool",(0.01*torch.randn(1)).to(device))
# sigma_beta0_cool=pyro.param("sigma_beta0_cool",(0.05*torch.abs(torch.randn(1))).to(device),constraints.positive)
# mu_beta1_cool=pyro.param("mu_beta1_cool",(0.01*torch.randn(1)).to(device))
# sigma_beta1_cool=pyro.param("sigma_beta1_cool",(0.05*torch.abs(torch.randn(1))).to(device),constraints.positive)
# sigma_cool=pyro.param("sigma_cool",(0.05*torch.abs(torch.randn(1))).to(device),constraints.positive)
# mu_cool_on=pyro.param("mu_cool_on",(0.01*torch.randn(1)).to(device))
# sigma_cool_on=pyro.param("sigma_cool_on",(0.05*torch.abs(torch.randn(1))).to(device),constraints.positive)
# mu_cool_off=pyro.param("mu_cool_off",(0.01*torch.randn(1)).to(device))
# sigma_cool_off=pyro.param("sigma_cool_off",(0.05*torch.abs(torch.randn(1))).to(device),constraints.positive)
# mu_aux=pyro.param("mu_aux",(0.01*torch.randn(1)).to(device))
# sigma_aux=pyro.param("sigma_aux",(0.05*torch.abs(torch.randn(1))).to(device),constraints.positive)
# mu_aux_on=pyro.param("mu_aux_on",(0.01*torch.randn(1)).to(device))
# sigma_aux_on=pyro.param("sigma_aux_on",(0.05*torch.abs(torch.randn(1))).to(device),constraints.positive)
# mu_aux_off=pyro.param("mu_aux_off",(0.01*torch.randn(1)).to(device))
# sigma_aux_off=pyro.param("sigma_aux_off",(0.05*torch.abs(torch.randn(1))).to(device),constraints.positive)
# mu_phi_df=pyro.param("mu_phi_df",(0.01*torch.randn(1)).to(device))
# sigma_phi_df=pyro.param("sigma_phi_df",(0.05*torch.abs(torch.randn(1))).to(device),constraints.positive)
# mu_psi=pyro.param("mu_psi",(0.01*torch.randn(1)).to(device))
# sigma_psi=pyro.param("sigma_psi",(0.05*torch.abs(torch.randn(1))).to(device),constraints.positive)
# mu_sigma_net=pyro.param("mu_sigma_net",(0.01*torch.randn(1)).to(device))
# sigma_sigma_net=pyro.param("sigma_sigma_net",(0.05*torch.abs(torch.randn(1))).to(device),constraints.positive)
| 63.007952
| 249
| 0.66551
| 10,418
| 63,386
| 3.748992
| 0.024957
| 0.042809
| 0.059196
| 0.065187
| 0.969276
| 0.968124
| 0.958932
| 0.951584
| 0.937194
| 0.929436
| 0
| 0.039908
| 0.172593
| 63,386
| 1,006
| 250
| 63.007952
| 0.704649
| 0.585603
| 0
| 0.76129
| 0
| 0
| 0.07658
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016129
| false
| 0
| 0.077419
| 0
| 0.106452
| 0.006452
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e8df8b1bd39e9d705842b0205549fdaaeddb357e
| 100
|
py
|
Python
|
pvops/__init__.py
|
bfemery-sandia/pvOps
|
fcdf47443041b3deb70f675481a70e7cf0b3dc93
|
[
"BSD-3-Clause"
] | 2
|
2021-04-21T23:42:36.000Z
|
2021-05-06T16:18:48.000Z
|
pvops/__init__.py
|
bfemery-sandia/pvOps
|
fcdf47443041b3deb70f675481a70e7cf0b3dc93
|
[
"BSD-3-Clause"
] | 13
|
2021-03-16T17:52:31.000Z
|
2021-05-20T21:19:56.000Z
|
pvops/__init__.py
|
bfemery-sandia/pvOps
|
fcdf47443041b3deb70f675481a70e7cf0b3dc93
|
[
"BSD-3-Clause"
] | 4
|
2021-05-26T13:49:21.000Z
|
2021-12-17T16:35:06.000Z
|
from pvops import text
from pvops import text2time
from pvops import timeseries
from pvops import iv
| 25
| 28
| 0.85
| 16
| 100
| 5.3125
| 0.4375
| 0.423529
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011765
| 0.15
| 100
| 4
| 29
| 25
| 0.988235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
33287115f38bf18bf458155b52f2d2ebe5e40800
| 62,741
|
py
|
Python
|
sdk/python/pulumi_azure/frontdoor/frontdoor.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/frontdoor/frontdoor.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/frontdoor/frontdoor.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['FrontdoorArgs', 'Frontdoor']
@pulumi.input_type
class FrontdoorArgs:
def __init__(__self__, *,
backend_pool_health_probes: pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolHealthProbeArgs']]],
backend_pool_load_balancings: pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolLoadBalancingArgs']]],
backend_pools: pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolArgs']]],
enforce_backend_pools_certificate_name_check: pulumi.Input[bool],
frontend_endpoints: pulumi.Input[Sequence[pulumi.Input['FrontdoorFrontendEndpointArgs']]],
resource_group_name: pulumi.Input[str],
routing_rules: pulumi.Input[Sequence[pulumi.Input['FrontdoorRoutingRuleArgs']]],
backend_pools_send_receive_timeout_seconds: Optional[pulumi.Input[int]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
load_balancer_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Frontdoor resource.
:param pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolHealthProbeArgs']]] backend_pool_health_probes: A `backend_pool_health_probe` block as defined below.
:param pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolLoadBalancingArgs']]] backend_pool_load_balancings: A `backend_pool_load_balancing` block as defined below.
:param pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolArgs']]] backend_pools: A `backend_pool` block as defined below.
:param pulumi.Input[bool] enforce_backend_pools_certificate_name_check: Enforce certificate name check on `HTTPS` requests to all backend pools, this setting will have no effect on `HTTP` requests. Permitted values are `true` or `false`.
:param pulumi.Input[Sequence[pulumi.Input['FrontdoorFrontendEndpointArgs']]] frontend_endpoints: A `frontend_endpoint` block as defined below.
:param pulumi.Input[str] resource_group_name: Specifies the name of the Resource Group in which the Front Door service should exist. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input['FrontdoorRoutingRuleArgs']]] routing_rules: A `routing_rule` block as defined below.
:param pulumi.Input[int] backend_pools_send_receive_timeout_seconds: Specifies the send and receive timeout on forwarding request to the backend. When the timeout is reached, the request fails and returns. Possible values are between `0` - `240`. Defaults to `60`.
:param pulumi.Input[str] friendly_name: A friendly name for the Front Door service.
:param pulumi.Input[bool] load_balancer_enabled: Should the Front Door Load Balancer be Enabled? Defaults to `true`.
:param pulumi.Input[str] location: The `location` argument is deprecated and is now always set to `global`.
:param pulumi.Input[str] name: Specifies the name of the Front Door service. Must be globally unique. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "backend_pool_health_probes", backend_pool_health_probes)
pulumi.set(__self__, "backend_pool_load_balancings", backend_pool_load_balancings)
pulumi.set(__self__, "backend_pools", backend_pools)
pulumi.set(__self__, "enforce_backend_pools_certificate_name_check", enforce_backend_pools_certificate_name_check)
pulumi.set(__self__, "frontend_endpoints", frontend_endpoints)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "routing_rules", routing_rules)
if backend_pools_send_receive_timeout_seconds is not None:
pulumi.set(__self__, "backend_pools_send_receive_timeout_seconds", backend_pools_send_receive_timeout_seconds)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if load_balancer_enabled is not None:
pulumi.set(__self__, "load_balancer_enabled", load_balancer_enabled)
if location is not None:
warnings.warn("""Due to the service's API changing 'location' must now always be set to 'Global' for new resources, however if the Front Door service was created prior 2020/03/10 it may continue to exist in a specific current location""", DeprecationWarning)
pulumi.log.warn("""location is deprecated: Due to the service's API changing 'location' must now always be set to 'Global' for new resources, however if the Front Door service was created prior 2020/03/10 it may continue to exist in a specific current location""")
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="backendPoolHealthProbes")
def backend_pool_health_probes(self) -> pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolHealthProbeArgs']]]:
"""
A `backend_pool_health_probe` block as defined below.
"""
return pulumi.get(self, "backend_pool_health_probes")
@backend_pool_health_probes.setter
def backend_pool_health_probes(self, value: pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolHealthProbeArgs']]]):
pulumi.set(self, "backend_pool_health_probes", value)
@property
@pulumi.getter(name="backendPoolLoadBalancings")
def backend_pool_load_balancings(self) -> pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolLoadBalancingArgs']]]:
"""
A `backend_pool_load_balancing` block as defined below.
"""
return pulumi.get(self, "backend_pool_load_balancings")
@backend_pool_load_balancings.setter
def backend_pool_load_balancings(self, value: pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolLoadBalancingArgs']]]):
pulumi.set(self, "backend_pool_load_balancings", value)
@property
@pulumi.getter(name="backendPools")
def backend_pools(self) -> pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolArgs']]]:
"""
A `backend_pool` block as defined below.
"""
return pulumi.get(self, "backend_pools")
@backend_pools.setter
def backend_pools(self, value: pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolArgs']]]):
pulumi.set(self, "backend_pools", value)
@property
@pulumi.getter(name="enforceBackendPoolsCertificateNameCheck")
def enforce_backend_pools_certificate_name_check(self) -> pulumi.Input[bool]:
"""
Enforce certificate name check on `HTTPS` requests to all backend pools, this setting will have no effect on `HTTP` requests. Permitted values are `true` or `false`.
"""
return pulumi.get(self, "enforce_backend_pools_certificate_name_check")
@enforce_backend_pools_certificate_name_check.setter
def enforce_backend_pools_certificate_name_check(self, value: pulumi.Input[bool]):
pulumi.set(self, "enforce_backend_pools_certificate_name_check", value)
@property
@pulumi.getter(name="frontendEndpoints")
def frontend_endpoints(self) -> pulumi.Input[Sequence[pulumi.Input['FrontdoorFrontendEndpointArgs']]]:
"""
A `frontend_endpoint` block as defined below.
"""
return pulumi.get(self, "frontend_endpoints")
@frontend_endpoints.setter
def frontend_endpoints(self, value: pulumi.Input[Sequence[pulumi.Input['FrontdoorFrontendEndpointArgs']]]):
pulumi.set(self, "frontend_endpoints", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Specifies the name of the Resource Group in which the Front Door service should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="routingRules")
def routing_rules(self) -> pulumi.Input[Sequence[pulumi.Input['FrontdoorRoutingRuleArgs']]]:
"""
A `routing_rule` block as defined below.
"""
return pulumi.get(self, "routing_rules")
@routing_rules.setter
def routing_rules(self, value: pulumi.Input[Sequence[pulumi.Input['FrontdoorRoutingRuleArgs']]]):
pulumi.set(self, "routing_rules", value)
@property
@pulumi.getter(name="backendPoolsSendReceiveTimeoutSeconds")
def backend_pools_send_receive_timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the send and receive timeout on forwarding request to the backend. When the timeout is reached, the request fails and returns. Possible values are between `0` - `240`. Defaults to `60`.
"""
return pulumi.get(self, "backend_pools_send_receive_timeout_seconds")
@backend_pools_send_receive_timeout_seconds.setter
def backend_pools_send_receive_timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "backend_pools_send_receive_timeout_seconds", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
A friendly name for the Front Door service.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="loadBalancerEnabled")
def load_balancer_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Should the Front Door Load Balancer be Enabled? Defaults to `true`.
"""
return pulumi.get(self, "load_balancer_enabled")
@load_balancer_enabled.setter
def load_balancer_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "load_balancer_enabled", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The `location` argument is deprecated and is now always set to `global`.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Front Door service. Must be globally unique. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _FrontdoorState:
def __init__(__self__, *,
backend_pool_health_probes: Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolHealthProbeArgs']]]] = None,
backend_pool_health_probes_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
backend_pool_load_balancing_settings_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
backend_pool_load_balancings: Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolLoadBalancingArgs']]]] = None,
backend_pools: Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolArgs']]]] = None,
backend_pools_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
backend_pools_send_receive_timeout_seconds: Optional[pulumi.Input[int]] = None,
cname: Optional[pulumi.Input[str]] = None,
enforce_backend_pools_certificate_name_check: Optional[pulumi.Input[bool]] = None,
explicit_resource_orders: Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorExplicitResourceOrderArgs']]]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
frontend_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorFrontendEndpointArgs']]]] = None,
frontend_endpoints_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
header_frontdoor_id: Optional[pulumi.Input[str]] = None,
load_balancer_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
routing_rules: Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorRoutingRuleArgs']]]] = None,
routing_rules_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Frontdoor resources.
:param pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolHealthProbeArgs']]] backend_pool_health_probes: A `backend_pool_health_probe` block as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] backend_pool_health_probes_map: A map/dictionary of Backend Pool Health Probe Names (key) to the Backend Pool Health Probe ID (value)
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] backend_pool_load_balancing_settings_map: A map/dictionary of Backend Pool Load Balancing Setting Names (key) to the Backend Pool Load Balancing Setting ID (value)
:param pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolLoadBalancingArgs']]] backend_pool_load_balancings: A `backend_pool_load_balancing` block as defined below.
:param pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolArgs']]] backend_pools: A `backend_pool` block as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] backend_pools_map: A map/dictionary of Backend Pool Names (key) to the Backend Pool ID (value)
:param pulumi.Input[int] backend_pools_send_receive_timeout_seconds: Specifies the send and receive timeout on forwarding request to the backend. When the timeout is reached, the request fails and returns. Possible values are between `0` - `240`. Defaults to `60`.
:param pulumi.Input[str] cname: The host that each frontendEndpoint must CNAME to.
:param pulumi.Input[bool] enforce_backend_pools_certificate_name_check: Enforce certificate name check on `HTTPS` requests to all backend pools, this setting will have no effect on `HTTP` requests. Permitted values are `true` or `false`.
:param pulumi.Input[str] friendly_name: A friendly name for the Front Door service.
:param pulumi.Input[Sequence[pulumi.Input['FrontdoorFrontendEndpointArgs']]] frontend_endpoints: A `frontend_endpoint` block as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] frontend_endpoints_map: The names of the `frontend_endpoint` blocks within this resource to associate with this `routing_rule`.
:param pulumi.Input[str] header_frontdoor_id: The unique ID of the Front Door which is embedded into the incoming headers `X-Azure-FDID` attribute and maybe used to filter traffic sent by the Front Door to your backend.
:param pulumi.Input[bool] load_balancer_enabled: Should the Front Door Load Balancer be Enabled? Defaults to `true`.
:param pulumi.Input[str] location: The `location` argument is deprecated and is now always set to `global`.
:param pulumi.Input[str] name: Specifies the name of the Front Door service. Must be globally unique. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: Specifies the name of the Resource Group in which the Front Door service should exist. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input['FrontdoorRoutingRuleArgs']]] routing_rules: A `routing_rule` block as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] routing_rules_map: A map/dictionary of Routing Rule Names (key) to the Routing Rule ID (value)
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
if backend_pool_health_probes is not None:
pulumi.set(__self__, "backend_pool_health_probes", backend_pool_health_probes)
if backend_pool_health_probes_map is not None:
pulumi.set(__self__, "backend_pool_health_probes_map", backend_pool_health_probes_map)
if backend_pool_load_balancing_settings_map is not None:
pulumi.set(__self__, "backend_pool_load_balancing_settings_map", backend_pool_load_balancing_settings_map)
if backend_pool_load_balancings is not None:
pulumi.set(__self__, "backend_pool_load_balancings", backend_pool_load_balancings)
if backend_pools is not None:
pulumi.set(__self__, "backend_pools", backend_pools)
if backend_pools_map is not None:
pulumi.set(__self__, "backend_pools_map", backend_pools_map)
if backend_pools_send_receive_timeout_seconds is not None:
pulumi.set(__self__, "backend_pools_send_receive_timeout_seconds", backend_pools_send_receive_timeout_seconds)
if cname is not None:
pulumi.set(__self__, "cname", cname)
if enforce_backend_pools_certificate_name_check is not None:
pulumi.set(__self__, "enforce_backend_pools_certificate_name_check", enforce_backend_pools_certificate_name_check)
if explicit_resource_orders is not None:
pulumi.set(__self__, "explicit_resource_orders", explicit_resource_orders)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if frontend_endpoints is not None:
pulumi.set(__self__, "frontend_endpoints", frontend_endpoints)
if frontend_endpoints_map is not None:
pulumi.set(__self__, "frontend_endpoints_map", frontend_endpoints_map)
if header_frontdoor_id is not None:
pulumi.set(__self__, "header_frontdoor_id", header_frontdoor_id)
if load_balancer_enabled is not None:
pulumi.set(__self__, "load_balancer_enabled", load_balancer_enabled)
if location is not None:
warnings.warn("""Due to the service's API changing 'location' must now always be set to 'Global' for new resources, however if the Front Door service was created prior 2020/03/10 it may continue to exist in a specific current location""", DeprecationWarning)
pulumi.log.warn("""location is deprecated: Due to the service's API changing 'location' must now always be set to 'Global' for new resources, however if the Front Door service was created prior 2020/03/10 it may continue to exist in a specific current location""")
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if routing_rules is not None:
pulumi.set(__self__, "routing_rules", routing_rules)
if routing_rules_map is not None:
pulumi.set(__self__, "routing_rules_map", routing_rules_map)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="backendPoolHealthProbes")
def backend_pool_health_probes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolHealthProbeArgs']]]]:
"""
A `backend_pool_health_probe` block as defined below.
"""
return pulumi.get(self, "backend_pool_health_probes")
@backend_pool_health_probes.setter
def backend_pool_health_probes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolHealthProbeArgs']]]]):
pulumi.set(self, "backend_pool_health_probes", value)
@property
@pulumi.getter(name="backendPoolHealthProbesMap")
def backend_pool_health_probes_map(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map/dictionary of Backend Pool Health Probe Names (key) to the Backend Pool Health Probe ID (value)
"""
return pulumi.get(self, "backend_pool_health_probes_map")
@backend_pool_health_probes_map.setter
def backend_pool_health_probes_map(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "backend_pool_health_probes_map", value)
@property
@pulumi.getter(name="backendPoolLoadBalancingSettingsMap")
def backend_pool_load_balancing_settings_map(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map/dictionary of Backend Pool Load Balancing Setting Names (key) to the Backend Pool Load Balancing Setting ID (value)
"""
return pulumi.get(self, "backend_pool_load_balancing_settings_map")
@backend_pool_load_balancing_settings_map.setter
def backend_pool_load_balancing_settings_map(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "backend_pool_load_balancing_settings_map", value)
@property
@pulumi.getter(name="backendPoolLoadBalancings")
def backend_pool_load_balancings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolLoadBalancingArgs']]]]:
"""
A `backend_pool_load_balancing` block as defined below.
"""
return pulumi.get(self, "backend_pool_load_balancings")
@backend_pool_load_balancings.setter
def backend_pool_load_balancings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolLoadBalancingArgs']]]]):
pulumi.set(self, "backend_pool_load_balancings", value)
@property
@pulumi.getter(name="backendPools")
def backend_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolArgs']]]]:
"""
A `backend_pool` block as defined below.
"""
return pulumi.get(self, "backend_pools")
@backend_pools.setter
def backend_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorBackendPoolArgs']]]]):
pulumi.set(self, "backend_pools", value)
@property
@pulumi.getter(name="backendPoolsMap")
def backend_pools_map(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map/dictionary of Backend Pool Names (key) to the Backend Pool ID (value)
"""
return pulumi.get(self, "backend_pools_map")
@backend_pools_map.setter
def backend_pools_map(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "backend_pools_map", value)
@property
@pulumi.getter(name="backendPoolsSendReceiveTimeoutSeconds")
def backend_pools_send_receive_timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the send and receive timeout on forwarding request to the backend. When the timeout is reached, the request fails and returns. Possible values are between `0` - `240`. Defaults to `60`.
"""
return pulumi.get(self, "backend_pools_send_receive_timeout_seconds")
@backend_pools_send_receive_timeout_seconds.setter
def backend_pools_send_receive_timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "backend_pools_send_receive_timeout_seconds", value)
@property
@pulumi.getter
def cname(self) -> Optional[pulumi.Input[str]]:
"""
The host that each frontendEndpoint must CNAME to.
"""
return pulumi.get(self, "cname")
@cname.setter
def cname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cname", value)
@property
@pulumi.getter(name="enforceBackendPoolsCertificateNameCheck")
def enforce_backend_pools_certificate_name_check(self) -> Optional[pulumi.Input[bool]]:
"""
Enforce certificate name check on `HTTPS` requests to all backend pools, this setting will have no effect on `HTTP` requests. Permitted values are `true` or `false`.
"""
return pulumi.get(self, "enforce_backend_pools_certificate_name_check")
@enforce_backend_pools_certificate_name_check.setter
def enforce_backend_pools_certificate_name_check(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enforce_backend_pools_certificate_name_check", value)
@property
@pulumi.getter(name="explicitResourceOrders")
def explicit_resource_orders(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorExplicitResourceOrderArgs']]]]:
return pulumi.get(self, "explicit_resource_orders")
@explicit_resource_orders.setter
def explicit_resource_orders(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorExplicitResourceOrderArgs']]]]):
pulumi.set(self, "explicit_resource_orders", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
A friendly name for the Front Door service.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="frontendEndpoints")
def frontend_endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorFrontendEndpointArgs']]]]:
"""
A `frontend_endpoint` block as defined below.
"""
return pulumi.get(self, "frontend_endpoints")
@frontend_endpoints.setter
def frontend_endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorFrontendEndpointArgs']]]]):
pulumi.set(self, "frontend_endpoints", value)
@property
@pulumi.getter(name="frontendEndpointsMap")
def frontend_endpoints_map(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The names of the `frontend_endpoint` blocks within this resource to associate with this `routing_rule`.
"""
return pulumi.get(self, "frontend_endpoints_map")
@frontend_endpoints_map.setter
def frontend_endpoints_map(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "frontend_endpoints_map", value)
@property
@pulumi.getter(name="headerFrontdoorId")
def header_frontdoor_id(self) -> Optional[pulumi.Input[str]]:
"""
The unique ID of the Front Door which is embedded into the incoming headers `X-Azure-FDID` attribute and maybe used to filter traffic sent by the Front Door to your backend.
"""
return pulumi.get(self, "header_frontdoor_id")
@header_frontdoor_id.setter
def header_frontdoor_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "header_frontdoor_id", value)
@property
@pulumi.getter(name="loadBalancerEnabled")
def load_balancer_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Should the Front Door Load Balancer be Enabled? Defaults to `true`.
"""
return pulumi.get(self, "load_balancer_enabled")
@load_balancer_enabled.setter
def load_balancer_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "load_balancer_enabled", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The `location` argument is deprecated and is now always set to `global`.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Front Door service. Must be globally unique. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Resource Group in which the Front Door service should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="routingRules")
def routing_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorRoutingRuleArgs']]]]:
"""
A `routing_rule` block as defined below.
"""
return pulumi.get(self, "routing_rules")
@routing_rules.setter
def routing_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FrontdoorRoutingRuleArgs']]]]):
pulumi.set(self, "routing_rules", value)
@property
@pulumi.getter(name="routingRulesMap")
def routing_rules_map(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map/dictionary of Routing Rule Names (key) to the Routing Rule ID (value)
"""
return pulumi.get(self, "routing_rules_map")
@routing_rules_map.setter
def routing_rules_map(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "routing_rules_map", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Frontdoor(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend_pool_health_probes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorBackendPoolHealthProbeArgs']]]]] = None,
backend_pool_load_balancings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorBackendPoolLoadBalancingArgs']]]]] = None,
backend_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorBackendPoolArgs']]]]] = None,
backend_pools_send_receive_timeout_seconds: Optional[pulumi.Input[int]] = None,
enforce_backend_pools_certificate_name_check: Optional[pulumi.Input[bool]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
frontend_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorFrontendEndpointArgs']]]]] = None,
load_balancer_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
routing_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorRoutingRuleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages an Azure Front Door instance.
Azure Front Door Service is Microsoft's highly available and scalable web application acceleration platform and global HTTP(s) load balancer. It provides built-in DDoS protection and application layer security and caching. Front Door enables you to build applications that maximize and automate high-availability and performance for your end-users. Use Front Door with Azure services including Web/Mobile Apps, Cloud Services and Virtual Machines – or combine it with on-premises services for hybrid deployments and smooth cloud migration.
Below are some of the key scenarios that Azure Front Door Service addresses:
* Use Front Door to improve application scale and availability with instant multi-region failover
* Use Front Door to improve application performance with SSL offload and routing requests to the fastest available application backend.
* Use Front Door for application layer security and DDoS protection for your application.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_frontdoor = azure.frontdoor.Frontdoor("exampleFrontdoor",
resource_group_name=example_resource_group.name,
enforce_backend_pools_certificate_name_check=False,
routing_rules=[azure.frontdoor.FrontdoorRoutingRuleArgs(
name="exampleRoutingRule1",
accepted_protocols=[
"Http",
"Https",
],
patterns_to_matches=["/*"],
frontend_endpoints=["exampleFrontendEndpoint1"],
forwarding_configuration=azure.frontdoor.FrontdoorRoutingRuleForwardingConfigurationArgs(
forwarding_protocol="MatchRequest",
backend_pool_name="exampleBackendBing",
),
)],
backend_pool_load_balancings=[azure.frontdoor.FrontdoorBackendPoolLoadBalancingArgs(
name="exampleLoadBalancingSettings1",
)],
backend_pool_health_probes=[azure.frontdoor.FrontdoorBackendPoolHealthProbeArgs(
name="exampleHealthProbeSetting1",
)],
backend_pools=[azure.frontdoor.FrontdoorBackendPoolArgs(
name="exampleBackendBing",
backends=[azure.frontdoor.FrontdoorBackendPoolBackendArgs(
host_header="www.bing.com",
address="www.bing.com",
http_port=80,
https_port=443,
)],
load_balancing_name="exampleLoadBalancingSettings1",
health_probe_name="exampleHealthProbeSetting1",
)],
frontend_endpoints=[azure.frontdoor.FrontdoorFrontendEndpointArgs(
name="exampleFrontendEndpoint1",
host_name="example-FrontDoor.azurefd.net",
)])
```
## Import
Front Doors can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:frontdoor/frontdoor:Frontdoor example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/mygroup1/providers/Microsoft.Network/frontDoors/frontdoor1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorBackendPoolHealthProbeArgs']]]] backend_pool_health_probes: A `backend_pool_health_probe` block as defined below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorBackendPoolLoadBalancingArgs']]]] backend_pool_load_balancings: A `backend_pool_load_balancing` block as defined below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorBackendPoolArgs']]]] backend_pools: A `backend_pool` block as defined below.
:param pulumi.Input[int] backend_pools_send_receive_timeout_seconds: Specifies the send and receive timeout on forwarding request to the backend. When the timeout is reached, the request fails and returns. Possible values are between `0` - `240`. Defaults to `60`.
:param pulumi.Input[bool] enforce_backend_pools_certificate_name_check: Enforce certificate name check on `HTTPS` requests to all backend pools, this setting will have no effect on `HTTP` requests. Permitted values are `true` or `false`.
:param pulumi.Input[str] friendly_name: A friendly name for the Front Door service.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorFrontendEndpointArgs']]]] frontend_endpoints: A `frontend_endpoint` block as defined below.
:param pulumi.Input[bool] load_balancer_enabled: Should the Front Door Load Balancer be Enabled? Defaults to `true`.
:param pulumi.Input[str] location: The `location` argument is deprecated and is now always set to `global`.
:param pulumi.Input[str] name: Specifies the name of the Front Door service. Must be globally unique. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: Specifies the name of the Resource Group in which the Front Door service should exist. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorRoutingRuleArgs']]]] routing_rules: A `routing_rule` block as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FrontdoorArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Azure Front Door instance.
Azure Front Door Service is Microsoft's highly available and scalable web application acceleration platform and global HTTP(s) load balancer. It provides built-in DDoS protection and application layer security and caching. Front Door enables you to build applications that maximize and automate high-availability and performance for your end-users. Use Front Door with Azure services including Web/Mobile Apps, Cloud Services and Virtual Machines – or combine it with on-premises services for hybrid deployments and smooth cloud migration.
Below are some of the key scenarios that Azure Front Door Service addresses:
* Use Front Door to improve application scale and availability with instant multi-region failover
* Use Front Door to improve application performance with SSL offload and routing requests to the fastest available application backend.
* Use Front Door for application layer security and DDoS protection for your application.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_frontdoor = azure.frontdoor.Frontdoor("exampleFrontdoor",
resource_group_name=example_resource_group.name,
enforce_backend_pools_certificate_name_check=False,
routing_rules=[azure.frontdoor.FrontdoorRoutingRuleArgs(
name="exampleRoutingRule1",
accepted_protocols=[
"Http",
"Https",
],
patterns_to_matches=["/*"],
frontend_endpoints=["exampleFrontendEndpoint1"],
forwarding_configuration=azure.frontdoor.FrontdoorRoutingRuleForwardingConfigurationArgs(
forwarding_protocol="MatchRequest",
backend_pool_name="exampleBackendBing",
),
)],
backend_pool_load_balancings=[azure.frontdoor.FrontdoorBackendPoolLoadBalancingArgs(
name="exampleLoadBalancingSettings1",
)],
backend_pool_health_probes=[azure.frontdoor.FrontdoorBackendPoolHealthProbeArgs(
name="exampleHealthProbeSetting1",
)],
backend_pools=[azure.frontdoor.FrontdoorBackendPoolArgs(
name="exampleBackendBing",
backends=[azure.frontdoor.FrontdoorBackendPoolBackendArgs(
host_header="www.bing.com",
address="www.bing.com",
http_port=80,
https_port=443,
)],
load_balancing_name="exampleLoadBalancingSettings1",
health_probe_name="exampleHealthProbeSetting1",
)],
frontend_endpoints=[azure.frontdoor.FrontdoorFrontendEndpointArgs(
name="exampleFrontendEndpoint1",
host_name="example-FrontDoor.azurefd.net",
)])
```
## Import
Front Doors can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:frontdoor/frontdoor:Frontdoor example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/mygroup1/providers/Microsoft.Network/frontDoors/frontdoor1
```
:param str resource_name: The name of the resource.
:param FrontdoorArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FrontdoorArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend_pool_health_probes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorBackendPoolHealthProbeArgs']]]]] = None,
backend_pool_load_balancings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorBackendPoolLoadBalancingArgs']]]]] = None,
backend_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorBackendPoolArgs']]]]] = None,
backend_pools_send_receive_timeout_seconds: Optional[pulumi.Input[int]] = None,
enforce_backend_pools_certificate_name_check: Optional[pulumi.Input[bool]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
frontend_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorFrontendEndpointArgs']]]]] = None,
load_balancer_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
routing_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorRoutingRuleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FrontdoorArgs.__new__(FrontdoorArgs)
if backend_pool_health_probes is None and not opts.urn:
raise TypeError("Missing required property 'backend_pool_health_probes'")
__props__.__dict__["backend_pool_health_probes"] = backend_pool_health_probes
if backend_pool_load_balancings is None and not opts.urn:
raise TypeError("Missing required property 'backend_pool_load_balancings'")
__props__.__dict__["backend_pool_load_balancings"] = backend_pool_load_balancings
if backend_pools is None and not opts.urn:
raise TypeError("Missing required property 'backend_pools'")
__props__.__dict__["backend_pools"] = backend_pools
__props__.__dict__["backend_pools_send_receive_timeout_seconds"] = backend_pools_send_receive_timeout_seconds
if enforce_backend_pools_certificate_name_check is None and not opts.urn:
raise TypeError("Missing required property 'enforce_backend_pools_certificate_name_check'")
__props__.__dict__["enforce_backend_pools_certificate_name_check"] = enforce_backend_pools_certificate_name_check
__props__.__dict__["friendly_name"] = friendly_name
if frontend_endpoints is None and not opts.urn:
raise TypeError("Missing required property 'frontend_endpoints'")
__props__.__dict__["frontend_endpoints"] = frontend_endpoints
__props__.__dict__["load_balancer_enabled"] = load_balancer_enabled
if location is not None and not opts.urn:
warnings.warn("""Due to the service's API changing 'location' must now always be set to 'Global' for new resources, however if the Front Door service was created prior 2020/03/10 it may continue to exist in a specific current location""", DeprecationWarning)
pulumi.log.warn("""location is deprecated: Due to the service's API changing 'location' must now always be set to 'Global' for new resources, however if the Front Door service was created prior 2020/03/10 it may continue to exist in a specific current location""")
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if routing_rules is None and not opts.urn:
raise TypeError("Missing required property 'routing_rules'")
__props__.__dict__["routing_rules"] = routing_rules
__props__.__dict__["tags"] = tags
__props__.__dict__["backend_pool_health_probes_map"] = None
__props__.__dict__["backend_pool_load_balancing_settings_map"] = None
__props__.__dict__["backend_pools_map"] = None
__props__.__dict__["cname"] = None
__props__.__dict__["explicit_resource_orders"] = None
__props__.__dict__["frontend_endpoints_map"] = None
__props__.__dict__["header_frontdoor_id"] = None
__props__.__dict__["routing_rules_map"] = None
super(Frontdoor, __self__).__init__(
'azure:frontdoor/frontdoor:Frontdoor',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
backend_pool_health_probes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorBackendPoolHealthProbeArgs']]]]] = None,
backend_pool_health_probes_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
backend_pool_load_balancing_settings_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
backend_pool_load_balancings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorBackendPoolLoadBalancingArgs']]]]] = None,
backend_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorBackendPoolArgs']]]]] = None,
backend_pools_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
backend_pools_send_receive_timeout_seconds: Optional[pulumi.Input[int]] = None,
cname: Optional[pulumi.Input[str]] = None,
enforce_backend_pools_certificate_name_check: Optional[pulumi.Input[bool]] = None,
explicit_resource_orders: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorExplicitResourceOrderArgs']]]]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
frontend_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorFrontendEndpointArgs']]]]] = None,
frontend_endpoints_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
header_frontdoor_id: Optional[pulumi.Input[str]] = None,
load_balancer_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
routing_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorRoutingRuleArgs']]]]] = None,
routing_rules_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Frontdoor':
"""
Get an existing Frontdoor resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorBackendPoolHealthProbeArgs']]]] backend_pool_health_probes: A `backend_pool_health_probe` block as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] backend_pool_health_probes_map: A map/dictionary of Backend Pool Health Probe Names (key) to the Backend Pool Health Probe ID (value)
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] backend_pool_load_balancing_settings_map: A map/dictionary of Backend Pool Load Balancing Setting Names (key) to the Backend Pool Load Balancing Setting ID (value)
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorBackendPoolLoadBalancingArgs']]]] backend_pool_load_balancings: A `backend_pool_load_balancing` block as defined below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorBackendPoolArgs']]]] backend_pools: A `backend_pool` block as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] backend_pools_map: A map/dictionary of Backend Pool Names (key) to the Backend Pool ID (value)
:param pulumi.Input[int] backend_pools_send_receive_timeout_seconds: Specifies the send and receive timeout on forwarding request to the backend. When the timeout is reached, the request fails and returns. Possible values are between `0` - `240`. Defaults to `60`.
:param pulumi.Input[str] cname: The host that each frontendEndpoint must CNAME to.
:param pulumi.Input[bool] enforce_backend_pools_certificate_name_check: Enforce certificate name check on `HTTPS` requests to all backend pools, this setting will have no effect on `HTTP` requests. Permitted values are `true` or `false`.
:param pulumi.Input[str] friendly_name: A friendly name for the Front Door service.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorFrontendEndpointArgs']]]] frontend_endpoints: A `frontend_endpoint` block as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] frontend_endpoints_map: The names of the `frontend_endpoint` blocks within this resource to associate with this `routing_rule`.
:param pulumi.Input[str] header_frontdoor_id: The unique ID of the Front Door which is embedded into the incoming headers `X-Azure-FDID` attribute and maybe used to filter traffic sent by the Front Door to your backend.
:param pulumi.Input[bool] load_balancer_enabled: Should the Front Door Load Balancer be Enabled? Defaults to `true`.
:param pulumi.Input[str] location: The `location` argument is deprecated and is now always set to `global`.
:param pulumi.Input[str] name: Specifies the name of the Front Door service. Must be globally unique. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: Specifies the name of the Resource Group in which the Front Door service should exist. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontdoorRoutingRuleArgs']]]] routing_rules: A `routing_rule` block as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] routing_rules_map: A map/dictionary of Routing Rule Names (key) to the Routing Rule ID (value)
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _FrontdoorState.__new__(_FrontdoorState)
__props__.__dict__["backend_pool_health_probes"] = backend_pool_health_probes
__props__.__dict__["backend_pool_health_probes_map"] = backend_pool_health_probes_map
__props__.__dict__["backend_pool_load_balancing_settings_map"] = backend_pool_load_balancing_settings_map
__props__.__dict__["backend_pool_load_balancings"] = backend_pool_load_balancings
__props__.__dict__["backend_pools"] = backend_pools
__props__.__dict__["backend_pools_map"] = backend_pools_map
__props__.__dict__["backend_pools_send_receive_timeout_seconds"] = backend_pools_send_receive_timeout_seconds
__props__.__dict__["cname"] = cname
__props__.__dict__["enforce_backend_pools_certificate_name_check"] = enforce_backend_pools_certificate_name_check
__props__.__dict__["explicit_resource_orders"] = explicit_resource_orders
__props__.__dict__["friendly_name"] = friendly_name
__props__.__dict__["frontend_endpoints"] = frontend_endpoints
__props__.__dict__["frontend_endpoints_map"] = frontend_endpoints_map
__props__.__dict__["header_frontdoor_id"] = header_frontdoor_id
__props__.__dict__["load_balancer_enabled"] = load_balancer_enabled
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["routing_rules"] = routing_rules
__props__.__dict__["routing_rules_map"] = routing_rules_map
__props__.__dict__["tags"] = tags
return Frontdoor(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="backendPoolHealthProbes")
def backend_pool_health_probes(self) -> pulumi.Output[Sequence['outputs.FrontdoorBackendPoolHealthProbe']]:
"""
A `backend_pool_health_probe` block as defined below.
"""
return pulumi.get(self, "backend_pool_health_probes")
@property
@pulumi.getter(name="backendPoolHealthProbesMap")
def backend_pool_health_probes_map(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map/dictionary of Backend Pool Health Probe Names (key) to the Backend Pool Health Probe ID (value)
"""
return pulumi.get(self, "backend_pool_health_probes_map")
@property
@pulumi.getter(name="backendPoolLoadBalancingSettingsMap")
def backend_pool_load_balancing_settings_map(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map/dictionary of Backend Pool Load Balancing Setting Names (key) to the Backend Pool Load Balancing Setting ID (value)
"""
return pulumi.get(self, "backend_pool_load_balancing_settings_map")
@property
@pulumi.getter(name="backendPoolLoadBalancings")
def backend_pool_load_balancings(self) -> pulumi.Output[Sequence['outputs.FrontdoorBackendPoolLoadBalancing']]:
"""
A `backend_pool_load_balancing` block as defined below.
"""
return pulumi.get(self, "backend_pool_load_balancings")
@property
@pulumi.getter(name="backendPools")
def backend_pools(self) -> pulumi.Output[Sequence['outputs.FrontdoorBackendPool']]:
"""
A `backend_pool` block as defined below.
"""
return pulumi.get(self, "backend_pools")
@property
@pulumi.getter(name="backendPoolsMap")
def backend_pools_map(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map/dictionary of Backend Pool Names (key) to the Backend Pool ID (value)
"""
return pulumi.get(self, "backend_pools_map")
@property
@pulumi.getter(name="backendPoolsSendReceiveTimeoutSeconds")
def backend_pools_send_receive_timeout_seconds(self) -> pulumi.Output[Optional[int]]:
"""
Specifies the send and receive timeout on forwarding request to the backend. When the timeout is reached, the request fails and returns. Possible values are between `0` - `240`. Defaults to `60`.
"""
return pulumi.get(self, "backend_pools_send_receive_timeout_seconds")
@property
@pulumi.getter
def cname(self) -> pulumi.Output[str]:
"""
The host that each frontendEndpoint must CNAME to.
"""
return pulumi.get(self, "cname")
@property
@pulumi.getter(name="enforceBackendPoolsCertificateNameCheck")
def enforce_backend_pools_certificate_name_check(self) -> pulumi.Output[bool]:
"""
Enforce certificate name check on `HTTPS` requests to all backend pools, this setting will have no effect on `HTTP` requests. Permitted values are `true` or `false`.
"""
return pulumi.get(self, "enforce_backend_pools_certificate_name_check")
@property
@pulumi.getter(name="explicitResourceOrders")
def explicit_resource_orders(self) -> pulumi.Output[Sequence['outputs.FrontdoorExplicitResourceOrder']]:
return pulumi.get(self, "explicit_resource_orders")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[Optional[str]]:
"""
A friendly name for the Front Door service.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="frontendEndpoints")
def frontend_endpoints(self) -> pulumi.Output[Sequence['outputs.FrontdoorFrontendEndpoint']]:
"""
A `frontend_endpoint` block as defined below.
"""
return pulumi.get(self, "frontend_endpoints")
@property
@pulumi.getter(name="frontendEndpointsMap")
def frontend_endpoints_map(self) -> pulumi.Output[Mapping[str, str]]:
"""
The names of the `frontend_endpoint` blocks within this resource to associate with this `routing_rule`.
"""
return pulumi.get(self, "frontend_endpoints_map")
@property
@pulumi.getter(name="headerFrontdoorId")
def header_frontdoor_id(self) -> pulumi.Output[str]:
"""
The unique ID of the Front Door which is embedded into the incoming headers `X-Azure-FDID` attribute and maybe used to filter traffic sent by the Front Door to your backend.
"""
return pulumi.get(self, "header_frontdoor_id")
@property
@pulumi.getter(name="loadBalancerEnabled")
def load_balancer_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Should the Front Door Load Balancer be Enabled? Defaults to `true`.
"""
return pulumi.get(self, "load_balancer_enabled")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The `location` argument is deprecated and is now always set to `global`.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Front Door service. Must be globally unique. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Resource Group in which the Front Door service should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="routingRules")
def routing_rules(self) -> pulumi.Output[Sequence['outputs.FrontdoorRoutingRule']]:
"""
A `routing_rule` block as defined below.
"""
return pulumi.get(self, "routing_rules")
@property
@pulumi.getter(name="routingRulesMap")
def routing_rules_map(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map/dictionary of Routing Rule Names (key) to the Routing Rule ID (value)
"""
return pulumi.get(self, "routing_rules_map")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
| 58.636449
| 547
| 0.70179
| 7,368
| 62,741
| 5.725977
| 0.051167
| 0.086302
| 0.057645
| 0.040887
| 0.950129
| 0.935433
| 0.914812
| 0.891725
| 0.865557
| 0.857783
| 0
| 0.003663
| 0.20379
| 62,741
| 1,069
| 548
| 58.6913
| 0.840837
| 0.351939
| 0
| 0.67517
| 1
| 0.010204
| 0.208534
| 0.119528
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163265
| false
| 0.001701
| 0.011905
| 0.003401
| 0.27551
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6865925dd5e6dcb20c21929fce4b569b7069cc4e
| 32,192
|
py
|
Python
|
tests/test_mpc.py
|
Juju-botu/mpc.pytorch
|
e81b27bdceb40828ac66bdabe6d8c86b111f73bd
|
[
"MIT"
] | 2
|
2021-01-29T09:24:47.000Z
|
2021-11-11T21:37:56.000Z
|
tests/test_mpc.py
|
Juju-botu/mpc.pytorch
|
e81b27bdceb40828ac66bdabe6d8c86b111f73bd
|
[
"MIT"
] | null | null | null |
tests/test_mpc.py
|
Juju-botu/mpc.pytorch
|
e81b27bdceb40828ac66bdabe6d8c86b111f73bd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Use for cloned repository only
# Remove when using installed mpc.pytorch
import sys; sys.path.append('..')
import torch
from torch.autograd import Function, Variable, grad
from torch.nn.parameter import Parameter
import numpy as np
import numpy.random as npr
import numpy.testing as npt
from numpy.testing import dec
import cvxpy as cp
import numdifftools as nd
import gc
import os
from mpc import mpc, util, pnqp
from mpc.dynamics import NNDynamics, AffineDynamics
from mpc.lqr_step import LQRStep
from mpc.mpc import GradMethods, QuadCost, LinDx
def lqr_qp_cp(C, c, lower, upper):
n = c.shape[0]
x = cp.Variable(n)
obj = 0.5*cp.quad_form(x, C) + cp.sum(cp.multiply(c, x))
cons = [lower <= x, x <= upper]
prob = cp.Problem(cp.Minimize(obj), cons)
prob.solve()
assert 'optimal' in prob.status
return np.array(x.value)
def lqr_cp(C, c, F, f, x_init, T, n_state, n_ctrl, u_lower, u_upper):
"""Solve min_{tau={x,u}} sum_t 0.5 tau_t^T C_t tau_t + c_t^T tau_t
s.t. x_{t+1} = A_t x_t + B_t u_t + f_t
x_0 = x_init
u_lower <= u <= u_upper
"""
tau = cp.Variable((n_state+n_ctrl, T))
assert (u_lower is None) == (u_upper is None)
objs = []
x0 = tau[:n_state,0]
u0 = tau[n_state:,0]
cons = [x0 == x_init]
for t in range(T):
xt = tau[:n_state,t]
ut = tau[n_state:,t]
objs.append(0.5*cp.quad_form(tau[:,t], C[t]) +
cp.sum(cp.multiply(c[t], tau[:,t])))
if u_lower is not None:
cons += [u_lower[t] <= ut, ut <= u_upper[t]]
if t+1 < T:
xtp1 = tau[:n_state, t+1]
cons.append(xtp1 == F[t]*tau[:,t]+f[t])
prob = cp.Problem(cp.Minimize(sum(objs)), cons)
# prob.solve(solver=cp.SCS, verbose=True)
prob.solve()
assert 'optimal' in prob.status
return np.array(tau.value), np.array([obj_t.value for obj_t in objs])
def test_lqr_qp():
npr.seed(1)
n_batch = 2
n = 100
C = npr.randn(n_batch, n, n)
C = np.matmul(C.transpose(0, 2, 1), C)
c = npr.randn(n_batch, n)
lower = -npr.random((n_batch, n))
upper = npr.random((n_batch, n))
opt_cp0 = lqr_qp_cp(C[0], c[0], lower[0], upper[0])
opt_cp1 = lqr_qp_cp(C[1], c[1], lower[1], upper[1])
C, c, lower, upper = [
torch.Tensor(x).double() if x is not None else None
for x in [C, c, lower, upper]
]
t = pnqp.pnqp(C, c, lower, upper)
opt_pnqp = t[0]
npt.assert_allclose(opt_cp0, opt_pnqp[0].numpy(), rtol=1e-3)
npt.assert_allclose(opt_cp1, opt_pnqp[1].numpy(), rtol=1e-3)
def test_lqr_linear_unbounded():
npr.seed(1)
n_batch = 2
n_state, n_ctrl = 3, 4
n_sc = n_state + n_ctrl
T = 5
C = npr.randn(T, n_batch, n_sc, n_sc)
C = np.matmul(C.transpose(0, 1, 3, 2), C)
c = npr.randn(T, n_batch, n_sc)
alpha = 0.2
R = np.tile(np.eye(n_state)+alpha*np.random.randn(n_state, n_state),
(T, n_batch, 1, 1))
S = np.tile(np.random.randn(n_state, n_ctrl), (T, n_batch, 1, 1))
F = np.concatenate((R, S), axis=3)
f = np.tile(npr.randn(n_state), (T, n_batch, 1))
x_init = npr.randn(n_batch, n_state)
# u_lower = -100.*npr.random((T, n_batch, n_ctrl))
# u_upper = 100.*npr.random((T, n_batch, n_ctrl))
u_lower = -1e4*np.ones((T, n_batch, n_ctrl))
u_upper = 1e4*np.ones((T, n_batch, n_ctrl))
tau_cp, objs_cp = lqr_cp(
C[:,0], c[:,0], F[:,0], f[:,0], x_init[0], T, n_state, n_ctrl,
None, None
)
tau_cp = tau_cp.T
x_cp = tau_cp[:,:n_state]
u_cp = tau_cp[:,n_state:]
C, c, R, S, F, f, x_init, u_lower, u_upper = [
Variable(torch.Tensor(x).double()) if x is not None else None
for x in [C, c, R, S, F, f, x_init, u_lower, u_upper]
]
dynamics = AffineDynamics(R[0,0], S[0,0], f[0,0])
u_lqr = None
x_lqr, u_lqr, objs_lqr = mpc.MPC(
n_state, n_ctrl, T, u_lower, u_upper, u_lqr,
lqr_iter=10,
backprop=False,
verbose=1,
exit_unconverged=True,
)(x_init, QuadCost(C, c), dynamics)
tau_lqr = torch.cat((x_lqr, u_lqr), 2)
tau_lqr = util.get_data_maybe(tau_lqr)
npt.assert_allclose(tau_cp, tau_lqr[:,0].numpy(), rtol=1e-3)
u_lqr = None
x_lqr, u_lqr, objs_lqr = mpc.MPC(
n_state, n_ctrl, T, None, None, u_lqr,
lqr_iter=10,
backprop=False,
exit_unconverged=False,
)(x_init, QuadCost(C, c), dynamics)
tau_lqr = torch.cat((x_lqr, u_lqr), 2)
tau_lqr = util.get_data_maybe(tau_lqr)
npt.assert_allclose(tau_cp, tau_lqr[:,0].numpy(), rtol=1e-3)
def test_lqr_linear_bounded():
npr.seed(1)
n_batch = 2
n_state, n_ctrl, T = 3, 4, 5
# n_state, n_ctrl, T = 50, 20, 30
n_sc = n_state + n_ctrl
C = npr.randn(T, n_batch, n_sc, n_sc)
C = np.matmul(C.transpose(0, 1, 3, 2), C)
c = npr.randn(T, n_batch, n_sc)
alpha = 0.2
R = np.tile(np.eye(n_state)+alpha*np.random.randn(n_state, n_state),
(T, n_batch, 1, 1))
S = np.tile(np.random.randn(n_state, n_ctrl), (T, n_batch, 1, 1))
F = np.concatenate((R, S), axis=3)
f = np.tile(npr.randn(n_state), (T, n_batch, 1))
x_init = npr.randn(n_batch, n_state)
u_lower = -npr.random((T, n_batch, n_ctrl))
u_upper = npr.random((T, n_batch, n_ctrl))
tau_cp, objs_cp = lqr_cp(
C[:,0], c[:,0], F[:,0], f[:,0], x_init[0], T, n_state, n_ctrl,
u_lower[:,0], u_upper[:,0],
)
tau_cp = tau_cp.T
x_cp = tau_cp[:,:n_state]
u_cp = tau_cp[:,n_state:]
C, c, R, S, F, f, x_init, u_lower, u_upper = [
Variable(torch.Tensor(x).double()) if x is not None else None
for x in [C, c, R, S, F, f, x_init, u_lower, u_upper]
]
dynamics = AffineDynamics(R[0,0], S[0,0], f[0,0])
x_lqr, u_lqr, objs_lqr = mpc.MPC(
n_state, n_ctrl, T, u_lower, u_upper,
lqr_iter=20, verbose=1,
backprop=False,
exit_unconverged=False,
)(x_init, QuadCost(C, c), dynamics)
tau_lqr = util.get_data_maybe(torch.cat((x_lqr, u_lqr), 2))
npt.assert_allclose(tau_cp, tau_lqr[:,0].numpy(), rtol=1e-3)
def test_lqr_linear_bounded_delta():
npr.seed(1)
n_batch = 2
n_state, n_ctrl, T = 3, 4, 5
n_sc = n_state + n_ctrl
C = npr.randn(T, n_batch, n_sc, n_sc)
C = np.matmul(C.transpose(0, 1, 3, 2), C)
c = npr.randn(T, n_batch, n_sc)
alpha = 0.2
R = np.tile(np.eye(n_state)+alpha*np.random.randn(n_state, n_state),
(T, n_batch, 1, 1))
S = 0.01*np.tile(np.random.randn(n_state, n_ctrl), (T, n_batch, 1, 1))
F = np.concatenate((R, S), axis=3)
f = np.tile(npr.randn(n_state), (T, n_batch, 1))
x_init = npr.randn(n_batch, n_state)
u_lower = -npr.random((T, n_batch, n_ctrl))
u_upper = npr.random((T, n_batch, n_ctrl))
tau_cp, objs_cp = lqr_cp(
C[:,0], c[:,0], F[:,0], f[:,0], x_init[0], T, n_state, n_ctrl,
u_lower[:,0], u_upper[:,0],
)
tau_cp = tau_cp.T
x_cp = tau_cp[:,:n_state]
u_cp = tau_cp[:,n_state:]
C, c, R, S, F, f, x_init, u_lower, u_upper = [
Variable(torch.Tensor(x).double()) if x is not None else None
for x in [C, c, R, S, F, f, x_init, u_lower, u_upper]
]
dynamics = AffineDynamics(R[0,0], S[0,0], f[0,0])
delta_u = 0.1
x_lqr, u_lqr, objs_lqr = mpc.MPC(
n_state, n_ctrl, T, u_lower, u_upper,
lqr_iter=1, verbose=1,
delta_u=delta_u,
backprop=False,
exit_unconverged=False,
)(x_init, QuadCost(C, c), dynamics)
u_lqr = util.get_data_maybe(u_lqr)
assert torch.abs(u_lqr).max() <= delta_u
@dec.skipif(not torch.cuda.is_available())
def test_lqr_cuda_singleton():
npr.seed(1)
n_batch = 5
n_state, n_ctrl = 3, 1
n_sc = n_state + n_ctrl
T = 5
C = npr.randn(T, n_batch, n_sc, n_sc)
C = np.matmul(C.transpose(0, 1, 3, 2), C)
c = npr.randn(T, n_batch, n_sc)
alpha = 0.2
R = np.tile(np.eye(n_state)+alpha*np.random.randn(n_state, n_state),
(T, n_batch, 1, 1))
S = np.tile(np.random.randn(n_state, n_ctrl), (T, n_batch, 1, 1))
F = np.concatenate((R, S), axis=3)
f = np.tile(npr.randn(n_state), (T, n_batch, 1))
x_init = npr.randn(n_batch, n_state)
# u_lower = -100.*npr.random((T, n_batch, n_ctrl))
# u_upper = 100.*npr.random((T, n_batch, n_ctrl))
u_lower = -1e4*np.ones((T, n_batch, n_ctrl))
u_upper = 1e4*np.ones((T, n_batch, n_ctrl))
tau_cp, objs_cp = lqr_cp(
C[:,0], c[:,0], F[:,0], f[:,0], x_init[0], T, n_state, n_ctrl,
None, None
)
tau_cp = tau_cp.T
x_cp = tau_cp[:,:n_state]
u_cp = tau_cp[:,n_state:]
C, c, R, S, F, f, x_init, u_lower, u_upper = [
Variable(torch.Tensor(x).double().cuda()) if x is not None else None
for x in [C, c, R, S, F, f, x_init, u_lower, u_upper]
]
dynamics = AffineDynamics(R[0,0], S[0,0], f[0,0])
u_lqr = None
x_lqr, u_lqr, objs_lqr = mpc.MPC(
n_state, n_ctrl, T, u_lower, u_upper, u_lqr,
lqr_iter=10,
backprop=False,
)(x_init, QuadCost(C, c), dynamics)
tau_lqr = torch.cat((x_lqr, u_lqr), 2)
tau_lqr = util.get_data_maybe(torch.cat((x_lqr, u_lqr), 2))
npt.assert_allclose(tau_cp, tau_lqr[:,0].cpu().numpy(), rtol=1e-3)
u_lqr = None
x_lqr, u_lqr, objs_lqr = mpc.MPC(
n_state, n_ctrl, T, None, None, u_lqr,
lqr_iter=10,
backprop=False,
)(x_init, QuadCost(C, c), dynamics)
tau_lqr = torch.cat((x_lqr, u_lqr), 2)
tau_lqr = util.get_data_maybe(torch.cat((x_lqr, u_lqr), 2))
npt.assert_allclose(tau_cp, tau_lqr[:,0].cpu().numpy(), rtol=1e-3)
# TODO: Lots of duplicated code here. Should clean up.
def test_lqr_backward_cost_linear_dynamics_unconstrained():
npr.seed(0)
torch.manual_seed(0)
n_batch, n_state, n_ctrl, T = 1, 2, 2, 3
hidden_sizes = [10, 10]
n_sc = n_state + n_ctrl
C = 10.*npr.randn(T, n_batch, n_sc, n_sc).astype(np.float64)
C = np.matmul(C.transpose(0, 1, 3, 2), C)
c = 10.*npr.randn(T, n_batch, n_sc).astype(np.float64)
x_init = npr.randn(n_batch, n_state).astype(np.float64)
beta = 100.
u_lower = -beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)
u_upper = beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)
F = npr.randn(T-1, n_batch, n_state, n_sc)
f = npr.randn(T-1, n_batch, n_state)
def forward_numpy(C, c, x_init, u_lower, u_upper, F, f):
_C, _c, _x_init, _u_lower, _u_upper, F, f = [
Variable(torch.Tensor(x).double()) if x is not None else None
for x in [C, c, x_init, u_lower, u_upper, F, f]
]
u_init = None
x_lqr, u_lqr, objs_lqr = mpc.MPC(
n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
lqr_iter=40,
verbose=1,
exit_unconverged=False,
backprop=False,
max_linesearch_iter=2,
)(_x_init, QuadCost(_C, _c), LinDx(F, f))
return util.get_data_maybe(u_lqr.view(-1)).numpy()
def f_c(c_flat):
c_ = c_flat.reshape(T, n_batch, n_sc)
return forward_numpy(C, c_, x_init, u_lower, u_upper, F, f)
def f_F(F_flat):
F_ = F_flat.reshape(T-1, n_batch, n_state, n_sc)
return forward_numpy(C, c, x_init, u_lower, u_upper, F_ ,f)
def f_f(f_flat):
f_ = f_flat.reshape(T-1, n_batch, n_state)
return forward_numpy(C, c, x_init, u_lower, u_upper, F, f_)
u = forward_numpy(C, c, x_init, u_lower, u_upper, F, f)
# Make sure the solution is not on the boundary.
assert np.all(u != u_lower.reshape(-1)) and np.all(u != u_upper.reshape(-1))
du_dc_fd = nd.Jacobian(f_c)(c.reshape(-1))
du_dF_fd = nd.Jacobian(f_F)(F.reshape(-1))
du_df_fd = nd.Jacobian(f_f)(f.reshape(-1))
_C, _c, _x_init, _u_lower, _u_upper, F, f = [
Variable(torch.Tensor(x).double(), requires_grad=True)
if x is not None else None
for x in [C, c, x_init, u_lower, u_upper, F, f]
]
u_init = None
x_lqr, u_lqr, objs_lqr = mpc.MPC(
n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
lqr_iter=20,
verbose=1,
exit_unconverged=False,
)(_x_init, QuadCost(_C, _c), LinDx(F, f))
u_lqr = u_lqr.view(-1)
du_dC = []
du_dc = []
du_dF = []
du_df = []
for i in range(len(u_lqr)):
dCi = grad(u_lqr[i], [_C], retain_graph=True)[0].view(-1)
dci = grad(u_lqr[i], [_c], retain_graph=True)[0].view(-1)
dF = grad(u_lqr[i], [F], retain_graph=True)[0].view(-1)
df = grad(u_lqr[i], [f], retain_graph=True)[0].view(-1)
du_dC.append(dCi)
du_dc.append(dci)
du_dF.append(dF)
du_df.append(df)
du_dC = torch.stack(du_dC).data.numpy()
du_dc = torch.stack(du_dc).data.numpy()
du_dF = torch.stack(du_dF).data.numpy()
du_df = torch.stack(du_df).data.numpy()
npt.assert_allclose(du_dc_fd, du_dc, atol=1e-4)
npt.assert_allclose(du_dF, du_dF_fd, atol=1e-4)
npt.assert_allclose(du_df, du_df_fd, atol=1e-4)
def test_lqr_backward_cost_linear_dynamics_constrained():
npr.seed(0)
torch.manual_seed(0)
n_batch, n_state, n_ctrl, T = 1, 2, 2, 3
hidden_sizes = [10, 10]
n_sc = n_state + n_ctrl
C = 10.*npr.randn(T, n_batch, n_sc, n_sc).astype(np.float64)
C = np.matmul(C.transpose(0, 1, 3, 2), C)
c = 10.*npr.randn(T, n_batch, n_sc).astype(np.float64)
x_init = npr.randn(n_batch, n_state).astype(np.float64)
beta = 0.5
u_lower = -beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)
u_upper = beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)
F = npr.randn(T-1, n_batch, n_state, n_sc)
f = npr.randn(T-1, n_batch, n_state)
def forward_numpy(C, c, x_init, u_lower, u_upper, F, f):
_C, _c, _x_init, _u_lower, _u_upper, F, f = [
Variable(torch.Tensor(x).double()) if x is not None else None
for x in [C, c, x_init, u_lower, u_upper, F, f]
]
u_init = None
x_lqr, u_lqr, objs_lqr = mpc.MPC(
n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
lqr_iter=40,
verbose=1,
exit_unconverged=True,
backprop=False,
max_linesearch_iter=2,
)(_x_init, QuadCost(_C, _c), LinDx(F, f))
return util.get_data_maybe(u_lqr.view(-1)).numpy()
def f_c(c_flat):
c_ = c_flat.reshape(T, n_batch, n_sc)
return forward_numpy(C, c_, x_init, u_lower, u_upper, F, f)
def f_F(F_flat):
F_ = F_flat.reshape(T-1, n_batch, n_state, n_sc)
return forward_numpy(C, c, x_init, u_lower, u_upper, F_, f)
def f_f(f_flat):
f_ = f_flat.reshape(T-1, n_batch, n_state)
return forward_numpy(C, c, x_init, u_lower, u_upper, F, f_)
def f_x_init(x_init):
x_init = x_init.reshape(1, -1)
return forward_numpy(C, c, x_init, u_lower, u_upper, F, f)
u = forward_numpy(C, c, x_init, u_lower, u_upper, F, f)
# Make sure the solution is strictly partially on the boundary.
assert np.any(u == u_lower.reshape(-1)) or np.any(u == u_upper.reshape(-1))
assert np.any((u != u_lower.reshape(-1)) & (u != u_upper.reshape(-1)))
du_dc_fd = nd.Jacobian(f_c)(c.reshape(-1))
du_dF_fd = nd.Jacobian(f_F)(F.reshape(-1))
du_df_fd = nd.Jacobian(f_f)(f.reshape(-1))
du_dxinit_fd = nd.Jacobian(f_x_init)(x_init[0])
_C, _c, _x_init, _u_lower, _u_upper, F, f = [
Variable(torch.Tensor(x).double(), requires_grad=True)
if x is not None else None
for x in [C, c, x_init, u_lower, u_upper, F, f]
]
u_init = None
x_lqr, u_lqr, objs_lqr = mpc.MPC(
n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
lqr_iter=20,
verbose=1,
)(_x_init, QuadCost(_C, _c), LinDx(F, f))
u_lqr_flat = u_lqr.view(-1)
du_dC = []
du_dc = []
du_dF = []
du_df = []
du_dx_init = []
for i in range(len(u_lqr_flat)):
dCi = grad(u_lqr_flat[i], [_C], retain_graph=True)[0].view(-1)
dci = grad(u_lqr_flat[i], [_c], retain_graph=True)[0].view(-1)
dF = grad(u_lqr_flat[i], [F], retain_graph=True)[0].view(-1)
df = grad(u_lqr_flat[i], [f], retain_graph=True)[0].view(-1)
dx_init = grad(u_lqr_flat[i], [_x_init], retain_graph=True)[0].view(-1)
du_dC.append(dCi)
du_dc.append(dci)
du_dF.append(dF)
du_df.append(df)
du_dx_init.append(dx_init)
du_dC = torch.stack(du_dC).data.numpy()
du_dc = torch.stack(du_dc).data.numpy()
du_dF = torch.stack(du_dF).data.numpy()
du_df = torch.stack(du_df).data.numpy()
du_dx_init = torch.stack(du_dx_init).data.numpy()
npt.assert_allclose(du_dc_fd, du_dc, atol=1e-4)
npt.assert_allclose(du_dF, du_dF_fd, atol=1e-4)
npt.assert_allclose(du_df, du_df_fd, atol=1e-4)
npt.assert_allclose(du_dx_init, du_dxinit_fd, atol=1e-4)
def test_lqr_backward_cost_affine_dynamics_module_constrained():
npr.seed(0)
torch.manual_seed(0)
n_batch, n_state, n_ctrl, T = 1, 2, 2, 2
hidden_sizes = [10]
n_sc = n_state + n_ctrl
C = 10.*npr.randn(T, n_batch, n_sc, n_sc).astype(np.float64)
C = np.matmul(C.transpose(0, 1, 3, 2), C)
c = 10.*npr.randn(T, n_batch, n_sc).astype(np.float64)
x_init = npr.randn(n_batch, n_state).astype(np.float64)
# beta = 0.5
beta = 2.0
u_lower = -beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)
u_upper = beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)
_C, _c, _x_init, _u_lower, _u_upper = [
Variable(torch.Tensor(x).double(), requires_grad=True)
if x is not None else None
for x in [C, c, x_init, u_lower, u_upper]
]
F = Variable(
torch.randn(1, 1, n_state, n_sc).repeat(T-1, 1, 1, 1).double(),
requires_grad=True)
dynamics = AffineDynamics(F[0,0,:,:n_state], F[0,0,:,n_state:])
u_init = None
x_lqr, u_lqr, objs_lqr = mpc.MPC(
n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
lqr_iter=20,
verbose=1,
)(_x_init, QuadCost(_C, _c), LinDx(F))
u_lqr_flat = u_lqr.view(-1)
du_dF = []
for i in range(len(u_lqr_flat)):
dF = grad(u_lqr_flat[i], [F], retain_graph=True)[0].view(-1)
du_dF.append(dF)
du_dF = torch.stack(du_dF).data.numpy()
u_init = None
x_lqr, u_lqr, objs_lqr = mpc.MPC(
n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
lqr_iter=20,
verbose=1,
)(_x_init, QuadCost(_C, _c), dynamics)
u_lqr_flat = u_lqr.view(-1)
du_dF_ = []
for i in range(len(u_lqr_flat)):
dF = grad(u_lqr_flat[i], [F], retain_graph=True)[0].view(-1)
du_dF_.append(dF)
du_dF_ = torch.stack(du_dF_).data.numpy()
npt.assert_allclose(du_dF, du_dF_, atol=1e-4)
def test_lqr_backward_cost_nn_dynamics_module_constrained():
npr.seed(0)
torch.manual_seed(0)
n_batch, n_state, n_ctrl, T = 1, 2, 2, 2
hidden_sizes = [10, 10]
n_sc = n_state + n_ctrl
C = 10.*npr.randn(T, n_batch, n_sc, n_sc).astype(np.float64)
C = np.matmul(C.transpose(0, 1, 3, 2), C)
c = 10.*npr.randn(T, n_batch, n_sc).astype(np.float64)
x_init = npr.randn(n_batch, n_state).astype(np.float64)
beta = 1.
u_lower = -beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)
u_upper = beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)
dynamics = NNDynamics(
n_state, n_ctrl, hidden_sizes, activation='sigmoid').double()
fc0b = dynamics.fcs[0].bias.view(-1).data.numpy().copy()
def forward_numpy(C, c, x_init, u_lower, u_upper, fc0b):
_C, _c, _x_init, _u_lower, _u_upper, fc0b = [
Variable(torch.Tensor(x).double()) if x is not None else None
for x in [C, c, x_init, u_lower, u_upper, fc0b]
]
dynamics.fcs[0].bias.data[:] = fc0b.data
# dynamics.A.data[:] = fc0b.view(n_state, n_state).data
u_init = None
x_lqr, u_lqr, objs_lqr = mpc.MPC(
n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
lqr_iter=40,
verbose=-1,
exit_unconverged=True,
backprop=False,
max_linesearch_iter=1,
)(_x_init, QuadCost(_C, _c), dynamics)
return util.get_data_maybe(u_lqr.view(-1)).numpy()
def f_c(c_flat):
c_ = c_flat.reshape(T, n_batch, n_sc)
return forward_numpy(C, c_, x_init, u_lower, u_upper, fc0b)
def f_fc0b(fc0b):
return forward_numpy(C, c, x_init, u_lower, u_upper, fc0b)
u = forward_numpy(C, c, x_init, u_lower, u_upper, fc0b)
# Make sure the solution is strictly partially on the boundary.
assert np.any(u == u_lower.reshape(-1)) or np.any(u == u_upper.reshape(-1))
assert np.any((u != u_lower.reshape(-1)) & (u != u_upper.reshape(-1)))
du_dc_fd = nd.Jacobian(f_c)(c.reshape(-1))
du_dfc0b_fd = nd.Jacobian(f_fc0b)(fc0b.reshape(-1))
dynamics.fcs[0].bias.data = torch.DoubleTensor(fc0b).clone()
_C, _c, _x_init, _u_lower, _u_upper, fc0b = [
Variable(torch.Tensor(x).double(), requires_grad=True)
if x is not None else None
for x in [C, c, x_init, u_lower, u_upper, fc0b]
]
u_init = None
x_lqr, u_lqr, objs_lqr = mpc.MPC(
n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
lqr_iter=20,
verbose=-1,
max_linesearch_iter=1,
grad_method=GradMethods.ANALYTIC,
)(_x_init, QuadCost(_C, _c), dynamics)
u_lqr_flat = u_lqr.view(-1)
du_dC = []
du_dc = []
du_dfc0b = []
for i in range(len(u_lqr_flat)):
dCi = grad(u_lqr_flat[i], [_C], retain_graph=True)[0].view(-1)
dci = grad(u_lqr_flat[i], [_c], retain_graph=True)[0].view(-1)
dfc0b = grad(u_lqr_flat[i], [dynamics.fcs[0].bias],
retain_graph=True)[0].view(-1)
du_dC.append(dCi)
du_dc.append(dci)
du_dfc0b.append(dfc0b)
du_dC = torch.stack(du_dC).data.numpy()
du_dc = torch.stack(du_dc).data.numpy()
du_dfc0b = torch.stack(du_dfc0b).data.numpy()
npt.assert_allclose(du_dc_fd, du_dc, atol=1e-3)
npt.assert_allclose(du_dfc0b_fd, du_dfc0b, atol=1e-3)
def test_lqr_backward_cost_nn_dynamics_module_constrained_slew():
npr.seed(0)
torch.manual_seed(0)
n_batch, n_state, n_ctrl, T = 1, 2, 2, 2
hidden_sizes = [10, 10]
n_sc = n_state + n_ctrl
C = 10.*npr.randn(T, n_batch, n_sc, n_sc).astype(np.float64)
C = np.matmul(C.transpose(0, 1, 3, 2), C)
c = 10.*npr.randn(T, n_batch, n_sc).astype(np.float64)
x_init = npr.randn(n_batch, n_state).astype(np.float64)
beta = 1.
u_lower = -beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)
u_upper = beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)
dynamics = NNDynamics(
n_state, n_ctrl, hidden_sizes, activation='sigmoid').double()
fc0b = dynamics.fcs[0].bias.view(-1).data.numpy().copy()
def forward_numpy(C, c, x_init, u_lower, u_upper, fc0b):
_C, _c, _x_init, _u_lower, _u_upper, fc0b = [
Variable(torch.Tensor(x).double(), requires_grad=True)
if x is not None else None
for x in [C, c, x_init, u_lower, u_upper, fc0b]
]
dynamics.fcs[0].bias.data[:] = fc0b.data
# dynamics.A.data[:] = fc0b.view(n_state, n_state).data
u_init = None
x_lqr, u_lqr, objs_lqr = mpc.MPC(
n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
lqr_iter=40,
verbose=-1,
exit_unconverged=True,
backprop=False,
max_linesearch_iter=1,
slew_rate_penalty=1.0,
)(_x_init, QuadCost(_C, _c), dynamics)
return util.get_data_maybe(u_lqr.view(-1)).numpy()
def f_c(c_flat):
c_ = c_flat.reshape(T, n_batch, n_sc)
return forward_numpy(C, c_, x_init, u_lower, u_upper, fc0b)
def f_fc0b(fc0b):
return forward_numpy(C, c, x_init, u_lower, u_upper, fc0b)
u = forward_numpy(C, c, x_init, u_lower, u_upper, fc0b)
# Make sure the solution is strictly partially on the boundary.
assert np.any(u == u_lower.reshape(-1)) or np.any(u == u_upper.reshape(-1))
assert np.any((u != u_lower.reshape(-1)) & (u != u_upper.reshape(-1)))
du_dc_fd = nd.Jacobian(f_c)(c.reshape(-1))
du_dfc0b_fd = nd.Jacobian(f_fc0b)(fc0b.reshape(-1))
dynamics.fcs[0].bias.data = torch.DoubleTensor(fc0b).clone()
_C, _c, _x_init, _u_lower, _u_upper, fc0b = [
Variable(torch.Tensor(x).double(), requires_grad=True)
if x is not None else None
for x in [C, c, x_init, u_lower, u_upper, fc0b]
]
u_init = None
x_lqr, u_lqr, objs_lqr = mpc.MPC(
n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
lqr_iter=20,
verbose=-1,
max_linesearch_iter=1,
grad_method=GradMethods.ANALYTIC,
slew_rate_penalty=1.0,
)(_x_init, QuadCost(_C, _c), dynamics)
u_lqr_flat = u_lqr.view(-1)
du_dC = []
du_dc = []
du_dfc0b = []
for i in range(len(u_lqr_flat)):
dCi = grad(u_lqr_flat[i], [_C], retain_graph=True)[0].contiguous().view(-1)
dci = grad(u_lqr_flat[i], [_c], retain_graph=True)[0].contiguous().view(-1)
dfc0b = grad(u_lqr_flat[i], [dynamics.fcs[0].bias],
retain_graph=True)[0].view(-1)
du_dC.append(dCi)
du_dc.append(dci)
du_dfc0b.append(dfc0b)
du_dC = torch.stack(du_dC).data.numpy()
du_dc = torch.stack(du_dc).data.numpy()
du_dfc0b = torch.stack(du_dfc0b).data.numpy()
npt.assert_allclose(du_dc_fd, du_dc, atol=1e-3)
npt.assert_allclose(du_dfc0b_fd, du_dfc0b, atol=1e-3)
def test_lqr_linearization():
npr.seed(0)
torch.manual_seed(0)
n_batch, n_state, n_ctrl, T = 2, 3, 4, 5
hidden_sizes = [10]
n_sc = n_state + n_ctrl
C = 10.*npr.randn(T, n_batch, n_sc, n_sc).astype(np.float64)
C = np.matmul(C.transpose(0, 1, 3, 2), C)
c = 10.*npr.randn(T, n_batch, n_sc).astype(np.float64)
x_init = npr.randn(n_batch, n_state).astype(np.float64)
# beta = 0.5
beta = 2.0
u_lower = -beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)
u_upper = beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)
_C, _c, _x_init, _u_lower, _u_upper = [
Variable(torch.Tensor(x).double(), requires_grad=True)
if x is not None else None
for x in [C, c, x_init, u_lower, u_upper]
]
F = Variable(
torch.randn(1, 1, n_state, n_sc).repeat(T-1, 1, 1, 1).double(),
requires_grad=True)
dynamics = NNDynamics(
n_state, n_ctrl, hidden_sizes, activation='sigmoid').double()
u_init = None
_lqr = mpc.MPC(
n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
grad_method=GradMethods.ANALYTIC,
)
u = torch.randn(T, n_batch, n_ctrl).type_as(_x_init.data)
x = util.get_traj(T, u, x_init=_x_init, dynamics=dynamics)
Fan, fan = _lqr.linearize_dynamics(x, u, dynamics, diff=False)
_lqr.grad_method=GradMethods.AUTO_DIFF
Fau, fau = _lqr.linearize_dynamics(x, u, dynamics, diff=False)
npt.assert_allclose(Fan.data.numpy(), Fau.data.numpy(), atol=1e-4)
npt.assert_allclose(fan.data.numpy(), fau.data.numpy(), atol=1e-4)
# Make sure diff version doesn't crash:
Fau, fau = _lqr.linearize_dynamics(x, u, dynamics, diff=True)
_lqr.grad_method=GradMethods.FINITE_DIFF
Ff, ff = _lqr.linearize_dynamics(x, u, dynamics, diff=False)
npt.assert_allclose(Fan.data.numpy(), Ff.data.numpy(), atol=1e-4)
npt.assert_allclose(fan.data.numpy(), ff.data.numpy(), atol=1e-4)
# Make sure diff version doesn't crash:
Ff, ff = _lqr.linearize_dynamics(x, u, dynamics, diff=True)
def test_lqr_slew_rate():
n_batch = 2
n_state, n_ctrl = 3, 4
n_sc = n_state + n_ctrl
T = 5
alpha = 0.2
torch.manual_seed(1)
C = torch.randn(T, n_batch, n_sc, n_sc)
C = C.transpose(2,3).matmul(C)
c = torch.randn(T, n_batch, n_sc)
x_init = torch.randn(n_batch, n_state)
R = torch.eye(n_state) + alpha*torch.randn(n_state, n_state)
S = torch.randn(n_state, n_ctrl)
f = torch.randn(n_state)
C, c, x_init, R, S, f = map(Variable, (C, c, x_init, R, S, f))
dynamics = AffineDynamics(R, S, f)
x, u, objs = mpc.MPC(
n_state, n_ctrl, T,
u_lower=None, u_upper=None, u_init=None,
lqr_iter=10,
backprop=False,
verbose=1,
exit_unconverged=False,
eps=1e-4,
)(x_init, QuadCost(C, c), dynamics)
# The solution should be the same when the slew rate approaches 0.
x_slew_eps, u_slew_eps, objs_slew_eps = mpc.MPC(
n_state, n_ctrl, T,
u_lower=None, u_upper=None, u_init=None,
lqr_iter=10,
backprop=False,
verbose=1,
exit_unconverged=False,
eps=1e-4,
slew_rate_penalty=1e-6,
)(x_init, QuadCost(C, c), dynamics)
npt.assert_allclose(x.data.numpy(), x_slew_eps.data.numpy(), atol=1e-3)
npt.assert_allclose(u.data.numpy(), u_slew_eps.data.numpy(), atol=1e-3)
x_slew, u_slew, objs_slew= mpc.MPC(
n_state, n_ctrl, T,
u_lower=None, u_upper=None, u_init=None,
lqr_iter=10,
backprop=False,
verbose=1,
exit_unconverged=False,
eps=1e-4,
slew_rate_penalty=1.,
)(x_init, QuadCost(C, c), dynamics)
assert np.alltrue((objs < objs_slew).numpy())
d = torch.norm(u[:-1] - u[1:]).item()
d_slew = torch.norm(u_slew[:-1] - u_slew[1:]).item()
assert d_slew < d
def test_memory():
import psutil
torch.manual_seed(0)
n_batch, n_state, n_ctrl, T = 2, 3, 4, 5
n_sc = n_state + n_ctrl
# Randomly initialize a PSD quadratic cost and linear dynamics.
C = torch.randn(T*n_batch, n_sc, n_sc)
C = torch.bmm(C, C.transpose(1, 2)).view(T, n_batch, n_sc, n_sc)
c = torch.randn(T, n_batch, n_sc)
alpha = 0.2
R = (torch.eye(n_state)+alpha*torch.randn(n_state, n_state)).repeat(T, n_batch, 1, 1)
S = torch.randn(T, n_batch, n_state, n_ctrl)
F = torch.cat((R, S), dim=3)
# The initial state.
x_init = torch.randn(n_batch, n_state)
# The upper and lower control bounds.
u_lower = -torch.rand(T, n_batch, n_ctrl)
u_upper = torch.rand(T, n_batch, n_ctrl)
process = psutil.Process(os.getpid())
# gc.collect()
# start_mem = process.memory_info().rss
# _lqr = LQRStep(
# n_state=n_state,
# n_ctrl=n_ctrl,
# T=T,
# u_lower=u_lower,
# u_upper=u_upper,
# u_zero_I=u_zero_I,
# true_cost=cost,
# true_dynamics=dynamics,
# delta_u=delta_u,
# delta_space=True,
# # current_x=x,
# # current_u=u,
# )
# e = Variable(torch.Tensor())
# x, u = _lqr(x_init, C, c, F, f if f is not None else e)
# gc.collect()
# mem_used = process.memory_info().rss - start_mem
# print(mem_used)
# assert mem_used == 0
gc.collect()
start_mem = process.memory_info().rss
_mpc = mpc.MPC(
n_state=n_state,
n_ctrl=n_ctrl,
T=T,
u_lower=u_lower,
u_upper=u_upper,
lqr_iter=20,
verbose=1,
backprop=False,
exit_unconverged=False,
)
_mpc(x_init, QuadCost(C, c), LinDx(F))
del _mpc
gc.collect()
mem_used = process.memory_info().rss - start_mem
print(mem_used)
assert mem_used == 0
if __name__=='__main__':
import sys
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
color_scheme='Linux', call_pdb=1)
test_lqr_qp()
test_lqr_linear_unbounded()
test_lqr_linear_bounded()
test_lqr_linear_bounded_delta()
# test_lqr_cuda_singleton()
test_lqr_backward_cost_linear_dynamics_unconstrained()
test_lqr_backward_cost_linear_dynamics_constrained()
test_lqr_backward_cost_affine_dynamics_module_constrained()
test_lqr_backward_cost_nn_dynamics_module_constrained()
test_lqr_backward_cost_nn_dynamics_module_constrained_slew()
test_lqr_linearization()
test_lqr_slew_rate()
# test_memory()
| 33.463617
| 89
| 0.607387
| 5,701
| 32,192
| 3.151728
| 0.051044
| 0.042743
| 0.034283
| 0.043411
| 0.860307
| 0.839771
| 0.826748
| 0.807157
| 0.79124
| 0.775045
| 0
| 0.028188
| 0.240712
| 32,192
| 961
| 90
| 33.498439
| 0.70691
| 0.053336
| 0
| 0.732167
| 0
| 0
| 0.001879
| 0
| 0
| 0
| 0
| 0.001041
| 0.05249
| 1
| 0.040377
| false
| 0
| 0.025572
| 0.002692
| 0.088829
| 0.001346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d7dfa0bb737ce6e15281438d987e60cbb5861093
| 16,647
|
py
|
Python
|
reliefparser/models/decoder.py
|
XuezheMax/ReLiefParser
|
4ffb2495002809de70809689b84d80d2a59cd2ac
|
[
"MIT"
] | 6
|
2016-11-02T20:28:01.000Z
|
2018-06-25T03:37:25.000Z
|
reliefparser/models/decoder.py
|
XuezheMax/ReLiefParser
|
4ffb2495002809de70809689b84d80d2a59cd2ac
|
[
"MIT"
] | null | null | null |
reliefparser/models/decoder.py
|
XuezheMax/ReLiefParser
|
4ffb2495002809de70809689b84d80d2a59cd2ac
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
import bisect
from time import time
class TreeDecoder(object):
def __init__(self, isize, hsize, msize, asize, max_len, rnn_class, **kwargs):
super(TreeDecoder, self).__init__()
self.name = kwargs.get('name', self.__class__.__name__)
self.scope = kwargs.get('scope', self.name)
self.epsilon = tf.Variable(kwargs.get('epsilon', 1.0), trainable=False)
self.isize = isize
self.hsize = hsize
self.msize = msize
self.asize = asize
self.max_len = max_len
self.num_layer = kwargs.get('num_layer', 1)
self.rnn_cell = tf.nn.rnn_cell.MultiRNNCell([rnn_class(num_units=self.hsize)] * self.num_layer)
self.weight_intializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
# memory : a [batch_size X seq_len X msize] tensor (float32)
# subtree_masks : a list of [batch_size X seq_len] tensors (float32)
# valid_indices : a list of [batch_size X seq_len] tensors (int32)
# left_indices : a list of [batch_size X seq_len] tensors (int32)
# right_indices : a list of [batch_size X seq_len] tensors (int32)
# valid_masks : a list of [batch_size X 2*seq_len] tensors (float32)
def __call__(self, memory, subtree_masks, valid_indices, left_indices, right_indices, valid_masks, init_state=None):
# initial states and variables across steps
batch_size = tf.shape(memory)[0]
if init_state is None:
init_state = self.rnn_cell.zero_state(batch_size, dtype=tf.float32)
# padding the memory with a dummy (all-zero) vector at the end of the 2nd dimension
pad_memory = tf.pad(memory, [[0,0],[0,1],[0,0]])
base_idx = tf.expand_dims(tf.range(batch_size) * tf.shape(pad_memory)[1], [1])
def step(state_tm, subtree_t, vd_idx_t, lt_idx_t, rt_idx_t, mask_t):
# attention vec left
weight_hid_input = tf.get_variable(name='weight_hidden_input', shape=[self.hsize, self.asize],
initializer=self.weight_intializer)
weight_mem_input = tf.get_variable(name='weight_head_input', shape=[1, self.msize, self.asize],
initializer=self.weight_intializer)
weight_input = tf.get_variable(name='weight_input', shape=[self.asize],
initializer=self.weight_intializer)
bias_input = tf.get_variable(name='bias_input', shape=[self.asize],
initializer=tf.constant_initializer(value=0.0))
# attention over the subtree memory
hid_tm = state_tm[-1] if isinstance(state_tm[-1], tf.Tensor) else state_tm[-1].h
att_input = tf.tanh(tf.expand_dims(tf.matmul(hid_tm, weight_hid_input), 1) + \
tf.nn.conv1d(pad_memory, weight_mem_input, 1, 'SAME') + \
bias_input)
score_input = tf.reduce_sum(att_input * weight_input, [2])
prob_input = tf.nn.softmax(score_input * subtree_t)
inp_t = tf.reduce_sum(tf.expand_dims(prob_input, dim=2) * pad_memory, reduction_indices=[1])
# perform the rnn step
hid_t, state_t = self.rnn_cell(inp_t, state_tm)
# valid memory, left memory & right memory
flat_vd_idx = base_idx + vd_idx_t
flat_lt_idx = base_idx + lt_idx_t
flat_rt_idx = base_idx + rt_idx_t
vd_mem = tf.gather(tf.reshape(pad_memory, [-1, self.msize]), flat_vd_idx) # valid memory
lt_mem = tf.gather(tf.reshape(pad_memory, [-1, self.msize]), flat_lt_idx) # left memory
rt_mem = tf.gather(tf.reshape(pad_memory, [-1, self.msize]), flat_rt_idx) # right memory
# parameters for left attention
weight_hid_left = tf.get_variable(name='weight_hidden_left', shape=[self.hsize, self.asize],
initializer=self.weight_intializer)
weight_hd_left = tf.get_variable(name='weight_head_left', shape=[1, self.msize, self.asize],
initializer=self.weight_intializer)
weight_cd_left = tf.get_variable(name='weight_child_left', shape=[1, self.msize, self.asize],
initializer=self.weight_intializer)
weight_left = tf.get_variable(name='weight_left', shape=[self.asize],
initializer=self.weight_intializer)
bias_left = tf.get_variable(name='bias_left', shape=[self.asize],
initializer=tf.constant_initializer(value=0.0))
# left-arc score (head = valid memory, child = left memory)
hd_att_left = tf.nn.conv1d(vd_mem, weight_hd_left, 1, 'SAME')
cd_att_left = tf.nn.conv1d(lt_mem, weight_cd_left, 1, 'SAME')
att_left = tf.tanh(tf.expand_dims(tf.matmul(hid_t, weight_hid_left), 1) +
hd_att_left + cd_att_left + bias_left)
score_left = tf.reduce_sum(att_left * weight_left, [2])
# parameters for right attention
weight_hid_right = tf.get_variable(name='weight_hidden_right', shape=[self.hsize, self.asize],
initializer=self.weight_intializer)
weight_hd_right = tf.get_variable(name='weight_head_right', shape=[1, self.msize, self.asize],
initializer=self.weight_intializer)
weight_cd_right = tf.get_variable(name='weight_child_right', shape=[1, self.msize, self.asize],
initializer=self.weight_intializer)
weight_right = tf.get_variable(name='weight_right', shape=[self.asize],
initializer=self.weight_intializer)
bias_right = tf.get_variable(name='bias_right', shape=[self.asize],
initializer=tf.constant_initializer(value=0.0))
# right-arc score (head = valid memory, child = right memory)
hd_att_right = tf.nn.conv1d(vd_mem, weight_hd_right, 1, 'SAME')
cd_att_right = tf.nn.conv1d(rt_mem, weight_cd_right, 1, 'SAME')
att_right = tf.tanh(tf.expand_dims(tf.matmul(hid_t, weight_hid_right), 1) +
hd_att_right + cd_att_right + bias_right)
score_right = tf.reduce_sum(att_right * weight_right, [2])
# concatenate and softmax
score_t = tf.concat(1, [score_left, score_right])
if mask_t is not None:
logp_t = tf.nn.log_softmax(score_t * mask_t)
else:
logp_t = tf.nn.log_softmax(score_t)
# use epsilon greedy as the exploring policy
greedy_act_func = lambda: tf.argmax(logp_t, dimension=1)
sample_act_func = lambda: tf.reshape(tf.multinomial(logp_t, 1), [-1])
# rand_num = tf.random_uniform(shape=[1])[0]
# act_t = tf.cond(rand_num>self.epsilon, greedy_act_func, sample_act_func)
act_t = sample_act_func()
act_t = tf.to_int32(act_t)
# probabilty of sampled action
prob_shape_t = tf.shape(logp_t)
action_idx = tf.range(prob_shape_t[0]) * prob_shape_t[1] + act_t
act_logp_t = tf.gather(tf.reshape(logp_t, [-1]), action_idx)
return hid_t, state_t, act_t, act_logp_t
hiddens, states, actions, act_logps = [], [], [], []
# core computational graph
with tf.variable_scope(self.scope) as dec_scope:
for step_idx in range(self.max_len):
# recurrent parameter share
if step_idx > 0:
dec_scope.reuse_variables()
# fetch step func arguments
state_tm = states[step_idx-1] if step_idx > 0 else init_state
subtree_t = subtree_masks[step_idx]
vd_idx_t = valid_indices[step_idx]
lt_idx_t = left_indices[step_idx]
rt_idx_t = right_indices[step_idx]
mask_t = valid_masks[step_idx] if valid_masks is not None else None
# call step func
hid_t, state_t, act_t, act_prob_t = step(state_tm, subtree_t, vd_idx_t, lt_idx_t, rt_idx_t, mask_t)
# store step func returns
hiddens.append(hid_t)
states.append(state_t)
actions.append(act_t)
act_logps.append(act_prob_t)
return hiddens, actions, act_logps
class Decoder(object):
def __init__(self, isize, hsize, msize, asize, max_len, rnn_class, **kwargs):
super(Decoder, self).__init__()
self.name = kwargs.get('name', self.__class__.__name__)
self.scope = kwargs.get('scope', self.name)
self.epsilon = tf.Variable(kwargs.get('epsilon', 1.0), trainable=False)
self.isize = isize
self.hsize = hsize
self.msize = msize
self.asize = asize
self.max_len = max_len
self.num_layer = kwargs.get('num_layer', 1)
self.rnn_cell = tf.nn.rnn_cell.MultiRNNCell([rnn_class(num_units=self.hsize)] * self.num_layer)
self.weight_intializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
def __call__(self, memory, input_indices, valid_indices, left_indices, right_indices, valid_masks, init_state=None):
# initial states and variables across steps
batch_size = tf.shape(memory)[0]
if init_state is None:
init_state = self.rnn_cell.zero_state(batch_size, dtype=tf.float32)
# padding the memory with a dummy (all-zero) vector at the end of the 2nd dimension
pad_memory = tf.pad(memory, [[0,0],[0,1],[0,0]])
base_idx = tf.expand_dims(tf.range(batch_size) * tf.shape(pad_memory)[1], [1])
def step(state_tm, in_idx_t, vd_idx_t, lt_idx_t, rt_idx_t, mask_t):
# combine previsouly predicted head and child as the current input
flat_in_idx = base_idx + in_idx_t
inp_vecs = tf.gather(tf.reshape(pad_memory, [-1, self.msize]), flat_in_idx)
inp_t = tf.reshape(inp_vecs, [batch_size, 2*self.msize])
# weight_combine = tf.get_variable(name='weight_combine', shape=[2*self.msize, self.isize],
# initializer=self.weight_intializer)
# bias_combine = tf.get_variable(name='bias_combine', shape=[self.isize],
# initializer=tf.constant_initializer(value=0.0))
# # TODO: discuss with Max about the model design here
# inp_vecs = tf.reshape(inp_vecs, [batch_size, 2*self.msize])
# inp_t = tf.tanh(tf.matmul(inp_vecs, weight_combine) + bias_combine)
# perform rnn step
hid_t, state_t = self.rnn_cell(inp_t, state_tm)
# valid memory, left memory & right memory
flat_vd_idx = base_idx + vd_idx_t
flat_lt_idx = base_idx + lt_idx_t
flat_rt_idx = base_idx + rt_idx_t
vd_mem = tf.gather(tf.reshape(pad_memory, [-1, self.msize]), flat_vd_idx) # valid memory
lt_mem = tf.gather(tf.reshape(pad_memory, [-1, self.msize]), flat_lt_idx) # left memory
rt_mem = tf.gather(tf.reshape(pad_memory, [-1, self.msize]), flat_rt_idx) # right memory
# attention vec left
weight_hid_left = tf.get_variable(name='weight_hidden_left', shape=[self.hsize, self.asize],
initializer=self.weight_intializer)
weight_hd_left = tf.get_variable(name='weight_head_left', shape=[1, self.msize, self.asize],
initializer=self.weight_intializer)
weight_cd_left = tf.get_variable(name='weight_child_left', shape=[1, self.msize, self.asize],
initializer=self.weight_intializer)
weight_left = tf.get_variable(name='weight_left', shape=[self.asize],
initializer=self.weight_intializer)
bias_left = tf.get_variable(name='bias_left', shape=[self.asize],
initializer=tf.constant_initializer(value=0.0))
weight_hid_right = tf.get_variable(name='weight_hidden_right', shape=[self.hsize, self.asize],
initializer=self.weight_intializer)
weight_hd_right = tf.get_variable(name='weight_head_right', shape=[1, self.msize, self.asize],
initializer=self.weight_intializer)
weight_cd_right = tf.get_variable(name='weight_child_right', shape=[1, self.msize, self.asize],
initializer=self.weight_intializer)
weight_right = tf.get_variable(name='weight_right', shape=[self.asize],
initializer=self.weight_intializer)
bias_right = tf.get_variable(name='bias_right', shape=[self.asize],
initializer=tf.constant_initializer(value=0.0))
# left-arc score (head = valid memory, child = left memory)
hd_att_left = tf.nn.conv1d(vd_mem, weight_hd_left, 1, 'SAME')
cd_att_left = tf.nn.conv1d(lt_mem, weight_cd_left, 1, 'SAME')
att_left = tf.tanh(tf.expand_dims(tf.matmul(hid_t, weight_hid_left), 1) +
hd_att_left + cd_att_left + bias_left)
score_left = tf.reduce_sum(att_left * weight_left, [2])
# right-arc score (head = valid memory, child = right memory)
hd_att_right = tf.nn.conv1d(vd_mem, weight_hd_right, 1, 'SAME')
cd_att_right = tf.nn.conv1d(rt_mem, weight_cd_right, 1, 'SAME')
att_right = tf.tanh(tf.expand_dims(tf.matmul(hid_t, weight_hid_right), 1) +
hd_att_right + cd_att_right + bias_right)
score_right = tf.reduce_sum(att_right * weight_right, [2])
# concatenate and softmax
score_t = tf.concat(1, [score_left, score_right])
if mask_t is not None:
score_t = score_t * mask_t
logp_t = tf.nn.log_softmax(score_t)
# use epsilon greedy as the exploring policy
greedy_act_func = lambda: tf.argmax(logp_t, dimension=1)
sample_act_func = lambda: tf.reshape(tf.multinomial(logp_t, 1), [-1])
# rand_num = tf.random_uniform(shape=[1])[0]
# act_t = tf.cond(rand_num>self.epsilon, greedy_act_func, sample_act_func)
act_t = sample_act_func()
act_t = tf.to_int32(act_t)
# probabilty of sampled action
prob_shape_t = tf.shape(logp_t)
action_idx = tf.range(prob_shape_t[0]) * prob_shape_t[1] + act_t
act_logp_t = tf.gather(tf.reshape(logp_t, [-1]), action_idx)
return hid_t, state_t, act_t, act_logp_t
hiddens, states, actions, act_logps = [], [], [], []
# core computational graph
with tf.variable_scope(self.scope) as dec_scope:
for step_idx in range(self.max_len):
# recurrent parameter share
if step_idx > 0:
dec_scope.reuse_variables()
# fetch step func arguments
state_tm = states[step_idx-1] if step_idx > 0 else init_state
in_idx_t = input_indices[step_idx]
vd_idx_t = valid_indices[step_idx]
lt_idx_t = left_indices[step_idx]
rt_idx_t = right_indices[step_idx]
mask_t = valid_masks[step_idx] if valid_masks is not None else None
# call step func
hid_t, state_t, act_t, act_prob_t = step(state_tm, in_idx_t, vd_idx_t, lt_idx_t, rt_idx_t, mask_t)
# store step func returns
hiddens.append(hid_t)
states.append(state_t)
actions.append(act_t)
act_logps.append(act_prob_t)
return hiddens, actions, act_logps
| 53.7
| 120
| 0.584069
| 2,193
| 16,647
| 4.124487
| 0.087551
| 0.012383
| 0.037369
| 0.048867
| 0.895633
| 0.875069
| 0.864677
| 0.85738
| 0.845992
| 0.834936
| 0
| 0.01277
| 0.317895
| 16,647
| 309
| 121
| 53.873786
| 0.783796
| 0.145732
| 0
| 0.832487
| 0
| 0
| 0.030928
| 0
| 0
| 0
| 0
| 0.003236
| 0
| 1
| 0.030457
| false
| 0
| 0.020305
| 0
| 0.081218
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0bd8259276b6336224ad4646308ba1071355ef97
| 346
|
py
|
Python
|
graphene_django_extras/filters/__init__.py
|
tinyjin/graphene-django-extras
|
905707426c364b01ba6ee173ec1a8cc42000b636
|
[
"MIT"
] | 409
|
2017-09-20T20:52:43.000Z
|
2022-03-16T20:01:52.000Z
|
graphene_django_extras/filters/__init__.py
|
tinyjin/graphene-django-extras
|
905707426c364b01ba6ee173ec1a8cc42000b636
|
[
"MIT"
] | 149
|
2017-09-30T20:48:05.000Z
|
2022-03-17T17:11:01.000Z
|
graphene_django_extras/filters/__init__.py
|
tinyjin/graphene-django-extras
|
905707426c364b01ba6ee173ec1a8cc42000b636
|
[
"MIT"
] | 106
|
2017-09-30T20:51:45.000Z
|
2022-02-21T19:16:32.000Z
|
# -*- coding: utf-8 -*-
from .lookups import (
ALL_LOOKUPS,
BASIC_LOOKUPS,
COMMON_LOOKUPS,
NUMBER_LOOKUPS,
DATETIME_LOOKUPS,
DATE_LOOKUPS,
TIME_LOOKUPS,
)
__all__ = (
"ALL_LOOKUPS",
"BASIC_LOOKUPS",
"COMMON_LOOKUPS",
"NUMBER_LOOKUPS",
"DATETIME_LOOKUPS",
"DATE_LOOKUPS",
"TIME_LOOKUPS",
)
| 16.47619
| 23
| 0.630058
| 35
| 346
| 5.714286
| 0.371429
| 0.1
| 0.15
| 0.22
| 0.85
| 0.85
| 0.85
| 0.85
| 0.85
| 0.85
| 0
| 0.003831
| 0.245665
| 346
| 20
| 24
| 17.3
| 0.762452
| 0.060694
| 0
| 0
| 0
| 0
| 0.28483
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0beef6761473afe128a67d9b5fd2ba993b61f119
| 163
|
py
|
Python
|
RL/jax_rl/datasets/__init__.py
|
mfinzi/residual-pathway-priors
|
f1b1910bd9cb69f3d6121fdb9b68e82d55db9983
|
[
"BSD-2-Clause"
] | 9
|
2021-11-23T18:21:57.000Z
|
2022-02-10T06:29:21.000Z
|
RL/jax_rl/datasets/__init__.py
|
mfinzi/residual-pathway-priors
|
f1b1910bd9cb69f3d6121fdb9b68e82d55db9983
|
[
"BSD-2-Clause"
] | null | null | null |
RL/jax_rl/datasets/__init__.py
|
mfinzi/residual-pathway-priors
|
f1b1910bd9cb69f3d6121fdb9b68e82d55db9983
|
[
"BSD-2-Clause"
] | null | null | null |
from jax_rl.datasets.dataset import Batch
from jax_rl.datasets.dataset_utils import make_env_and_dataset
from jax_rl.datasets.replay_buffer import ReplayBuffer
| 40.75
| 63
| 0.871166
| 26
| 163
| 5.153846
| 0.538462
| 0.156716
| 0.201493
| 0.380597
| 0.358209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092025
| 163
| 3
| 64
| 54.333333
| 0.905405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0401fbb7e6c69b00415c5aa81f30752bb8a6f111
| 1,090
|
py
|
Python
|
menpo/image/test/image_blank_test.py
|
yuxiang-zhou/menpo
|
01deaf3808cbe7a3d9db5542ac9d9f53cd81743a
|
[
"BSD-3-Clause"
] | 1
|
2021-04-20T00:36:57.000Z
|
2021-04-20T00:36:57.000Z
|
menpo/image/test/image_blank_test.py
|
yuxiang-zhou/menpo
|
01deaf3808cbe7a3d9db5542ac9d9f53cd81743a
|
[
"BSD-3-Clause"
] | 1
|
2019-03-09T16:01:46.000Z
|
2019-03-09T16:01:46.000Z
|
menpo/image/test/image_blank_test.py
|
yuxiang-zhou/menpo
|
01deaf3808cbe7a3d9db5542ac9d9f53cd81743a
|
[
"BSD-3-Clause"
] | 1
|
2020-05-01T09:55:57.000Z
|
2020-05-01T09:55:57.000Z
|
import numpy as np
from menpo.image import *
def test_blank_1_channel_image():
mask = np.zeros((10, 10), dtype=np.bool)
im = MaskedImage.init_blank((10, 10), mask=mask)
assert np.all(im.pixels == 0.0)
assert im.n_channels == 1
assert np.all(im.mask.pixels == 0.0)
im = MaskedImage.init_blank((10, 10), fill=0.5)
assert np.all(im.pixels == 0.5)
def test_blank_3_channel_image():
mask = np.zeros((10, 10), dtype=np.bool)
im = MaskedImage.init_blank((10, 10), mask=mask, n_channels=3)
assert np.all(im.pixels == 0.0)
assert im.n_channels == 3
assert np.all(im.mask.pixels == 0.0)
im = MaskedImage.init_blank((10, 10), fill=0.5, n_channels=3)
assert np.all(im.pixels == 0.5)
def test_blank_maskedimage():
mask = np.zeros((10, 10), dtype=np.bool)
im = MaskedImage.init_blank((10, 10), mask=mask, n_channels=10)
assert np.all(im.pixels == 0.0)
assert im.n_channels == 10
assert np.all(im.mask.pixels == 0.0)
im = MaskedImage.init_blank((10, 10), fill=2.0, n_channels=10)
assert np.all(im.pixels == 2.0)
| 30.277778
| 67
| 0.646789
| 192
| 1,090
| 3.552083
| 0.15625
| 0.052786
| 0.145161
| 0.171554
| 0.904692
| 0.904692
| 0.904692
| 0.900293
| 0.853372
| 0.837243
| 0
| 0.081264
| 0.187156
| 1,090
| 35
| 68
| 31.142857
| 0.688488
| 0
| 0
| 0.423077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.461538
| 1
| 0.115385
| false
| 0
| 0.076923
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0459993ca1db5816fa8cdee86ccc9fc908dd0619
| 68,377
|
py
|
Python
|
src/analysis/draw_custom_proposals.py
|
haoli-buaa/faster-rcnn-scenarios
|
d1086f39ee7fe7c0e4720c20861abee9980bd018
|
[
"MIT"
] | 14
|
2017-05-23T03:21:27.000Z
|
2021-01-18T10:31:54.000Z
|
src/analysis/draw_custom_proposals.py
|
haoli-buaa/faster-rcnn-scenarios
|
d1086f39ee7fe7c0e4720c20861abee9980bd018
|
[
"MIT"
] | 3
|
2017-11-23T03:36:15.000Z
|
2019-05-12T21:17:56.000Z
|
src/analysis/draw_custom_proposals.py
|
djdam/faster-rcnn-scenarios
|
d1086f39ee7fe7c0e4720c20861abee9980bd018
|
[
"MIT"
] | 12
|
2017-06-02T01:35:15.000Z
|
2020-08-17T06:22:54.000Z
|
#!/usr/bin/env python
import cPickle
import numpy as np
from bbox_helper import BBoxHelper
import sys
from os.path import dirname, join, basename
import cv2
this_dir = dirname(__file__)
if __name__ == '__main__':
sys.path.insert(0, join(this_dir,'..'))
import _init_paths
import os
os.chdir(_init_paths.faster_rcnn_root)
from datasets.factory import get_imdb
IMAGES_TO_ANNOTATE = 25
train_gt_roidb_pkl_file='/home/dennis/workspace/faster-rcnn-scenarios/src/train__gt_roidb.pkl'
cache=cPickle.load(open(train_gt_roidb_pkl_file, 'r'))
filenames=[
'./data/technicaldrawings/numbers/annotations/JamesBell-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal386.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal130.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/WilliamMorris-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1323.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_150.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_513.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_13.pdf.xml',
'./data/technicaldrawings/numbers/annotations/MissDestinyWagnerDVM-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde59.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo450.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1302.pdf.xml',
'./data/technicaldrawings/numbers/annotations/BrandyTaylor-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo634.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1310.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1375.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_119.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_29.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo159.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_9.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_106.pdf.xml',
'./data/technicaldrawings/numbers/annotations/SarahLawson-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1315.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_16.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_593.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal134.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal136.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal125.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal140.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1322.pdf.xml',
'./data/technicaldrawings/numbers/annotations/DylanSantiago-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde28.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_163.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_182.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_524.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_515.pdf.xml',
'./data/technicaldrawings/numbers/annotations/sluis13.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo446.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/ValerieBautista-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_295.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo448.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/JenniferPoole-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_70.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde12.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_20.pdf.xml',
'./data/technicaldrawings/numbers/annotations/AdamSantos-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_46.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/KimberlyStewart-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_2.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_231.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_299.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_584.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_521.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo461.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo114.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/veere_596.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_201.pdf.xml',
'./data/technicaldrawings/numbers/annotations/ChadSmith-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_213.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_132.pdf.xml',
'./data/technicaldrawings/numbers/annotations/AlejandroPowell-0.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_337.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_537.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_339.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_488.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo113.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_8.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_108.pdf.xml',
'./data/technicaldrawings/numbers/annotations/RyanGrant-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_110.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_1011.pdf.xml',
'./data/technicaldrawings/numbers/annotations/CoreyJohnston-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde1398.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal0.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo478.pdf.xml',
'./data/technicaldrawings/numbers/annotations/AndreaRoberts-0.xml',
'./data/technicaldrawings/numbers/annotations/DennisTorres-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal100.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_598.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1303.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde42.pdf.xml',
'./data/technicaldrawings/numbers/annotations/StanleyMelton-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_101.pdf.xml',
'./data/technicaldrawings/numbers/annotations/ChristinaOconnor-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde59.pdf-3.xml',
'./data/technicaldrawings/numbers/annotations/veere_280.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/sluis60.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/MargaretScott-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_176.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_24.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_541.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1379.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_195.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde135.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde39.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_240.pdf.xml',
'./data/technicaldrawings/numbers/annotations/JosephGuerra-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_62.pdf.xml',
'./data/technicaldrawings/numbers/annotations/NicoleKirk-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_217.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_38.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal124.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/sluis17.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_491.pdf.xml',
'./data/technicaldrawings/numbers/annotations/FeliciaWilliams-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo145.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1055.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1701.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde6.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_181.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1053.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/sluis50.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_49.pdf.xml',
'./data/technicaldrawings/numbers/annotations/sluis61.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1329.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde67.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal10006.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo670.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1357.pdf.xml',
'./data/technicaldrawings/numbers/annotations/NicoleRoberts-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_1000.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal126.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_503.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde145.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_123.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_0.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_159.pdf.xml',
'./data/technicaldrawings/numbers/annotations/SaraLawrence-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal304.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_128.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_550.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_232.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/RyanSuarez-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1350.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_36.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal137.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo150.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1324.pdf.xml',
'./data/technicaldrawings/numbers/annotations/RogerRoberts-0.xml',
'./data/technicaldrawings/numbers/annotations/RobertHawkins-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal354.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo655.pdf.xml',
'./data/technicaldrawings/numbers/annotations/AdamLeonard-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_60.pdf.xml',
'./data/technicaldrawings/numbers/annotations/DanielAnderson-0.xml',
'./data/technicaldrawings/numbers/annotations/JenniferYates-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde143.pdf.xml',
'./data/technicaldrawings/numbers/annotations/AmandaBarry-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_238.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1383.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_136.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde0.pdf.xml',
'./data/technicaldrawings/numbers/annotations/KariPerez-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1339.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_545.pdf.xml',
'./data/technicaldrawings/numbers/annotations/HenryCrawford-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde59.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1697.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal350.pdf.xml',
'./data/technicaldrawings/numbers/annotations/StacyRivera-0.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_470.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1309.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_212.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_162.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_204.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1360.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/CindyMoore-0.xml',
'./data/technicaldrawings/numbers/annotations/ChristinaMcdonald-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal138.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_19.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1369.pdf.xml',
'./data/technicaldrawings/numbers/annotations/MatthewKim-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_570.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo456.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1053.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/LauraRosario-0.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_1.pdf.xml',
'./data/technicaldrawings/numbers/annotations/AmandaBowen-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_287.pdf.xml',
'./data/technicaldrawings/numbers/annotations/NicoleWebster-1.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1381.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_572.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_40.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde41.pdf.xml',
'./data/technicaldrawings/numbers/annotations/EdwardJohnson-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde1391.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde29.pdf.xml',
'./data/technicaldrawings/numbers/annotations/MatthewHardin-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo141.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_1005.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_134.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal133.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_38.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_517.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal125.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal139.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_330.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal135.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_10.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_234.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1374.pdf.xml',
'./data/technicaldrawings/numbers/annotations/CarrieRussell-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_74.pdf.xml',
'./data/technicaldrawings/numbers/annotations/ErikMcgrath-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_304.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde40.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1319.pdf.xml',
'./data/technicaldrawings/numbers/annotations/DavidHale-0.xml',
'./data/technicaldrawings/numbers/annotations/RaymondAllen-0.xml',
'./data/technicaldrawings/numbers/annotations/MirandaCook-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde76.pdf.xml',
'./data/technicaldrawings/numbers/annotations/VanessaOwensMD-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1023.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_549.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal111.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1707.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_538.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde34.pdf.xml',
'./data/technicaldrawings/numbers/annotations/JenniferMosley-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_539.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal383.pdf.xml',
'./data/technicaldrawings/numbers/annotations/JoseRodriguez-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_564.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_40.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/KennethIrwin-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_481.pdf.xml',
'./data/technicaldrawings/numbers/annotations/MichelleAnderson-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_563.pdf.xml',
'./data/technicaldrawings/numbers/annotations/RobertReeves-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_197.pdf.xml',
'./data/technicaldrawings/numbers/annotations/ThomasBray-0.xml',
'./data/technicaldrawings/numbers/annotations/AngelaEwing-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde58.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal303.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/DanielPierce-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo452.pdf.xml',
'./data/technicaldrawings/numbers/annotations/MichaelReynolds-0.xml',
'./data/technicaldrawings/numbers/annotations/CalebChen-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo138.pdf.xml',
'./data/technicaldrawings/numbers/annotations/SherylBarnes-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_67.pdf.xml',
'./data/technicaldrawings/numbers/annotations/HeatherCollins-0.xml',
'./data/technicaldrawings/numbers/annotations/sluis59.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1372.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_11.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_577.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_17.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_1006.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_487.pdf.xml',
'./data/technicaldrawings/numbers/annotations/LauraWalker-0.xml',
'./data/technicaldrawings/numbers/annotations/HeatherPhillips-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_516.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_193.pdf.xml',
'./data/technicaldrawings/numbers/annotations/AndreaNorris-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_184.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1354.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1328.pdf.xml',
'./data/technicaldrawings/numbers/annotations/MorganAlvarez-0.xml',
'./data/technicaldrawings/numbers/annotations/PatriciaRice-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_200.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde1016.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1326.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo147.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_505.pdf.xml',
'./data/technicaldrawings/numbers/annotations/StephanieMorrison-0.xml',
'./data/technicaldrawings/numbers/annotations/KevinNguyen-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1368.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_65.pdf.xml',
'./data/technicaldrawings/numbers/annotations/SaraHutchinson-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde72.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1695.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_170.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_285.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1380.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo114.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_41.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_12.pdf.xml',
'./data/technicaldrawings/numbers/annotations/KristinWilson-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_288.pdf.xml',
'./data/technicaldrawings/numbers/annotations/KatherineWilliams-0.xml',
'./data/technicaldrawings/numbers/annotations/MatthewGarcia-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde135.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_242.pdf.xml',
'./data/technicaldrawings/numbers/annotations/EdwardThompson-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo114.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1346.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1291.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo134.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_42.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo454.pdf.xml',
'./data/technicaldrawings/numbers/annotations/MelanieConway-0.xml',
'./data/technicaldrawings/numbers/annotations/JulieBest-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo458.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_190.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1698.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_149.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_4.pdf.xml',
'./data/technicaldrawings/numbers/annotations/ThomasJames-0.xml',
'./data/technicaldrawings/numbers/annotations/ClaudiaJohnson-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde2.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/GregoryJohnson-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_255.pdf.xml',
'./data/technicaldrawings/numbers/annotations/AmandaRoberts-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo638.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_540.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_509.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_187.pdf.xml',
'./data/technicaldrawings/numbers/annotations/sluis12.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_152.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal368.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_160.pdf.xml',
'./data/technicaldrawings/numbers/annotations/CynthiaJohnson-0.xml',
'./data/technicaldrawings/numbers/annotations/sluis18.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/Mrs.CynthiaFisherMD-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_135.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_16.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal355.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde44.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_507.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal117.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal312.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo136.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_124.pdf.xml',
'./data/technicaldrawings/numbers/annotations/StevenBaker-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_20.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1013.pdf.xml',
'./data/technicaldrawings/numbers/annotations/ElizabethThomas-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo449.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo451.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal136.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_14.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_69.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/LoriWilkinson-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal344.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1367.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_226.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_457.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_1004.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1325.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_1003.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_548.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_40.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal134.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/GregoryMeyer-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_579.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_494.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_580.pdf.xml',
'./data/technicaldrawings/numbers/annotations/NicoleWilliams-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_246.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1384.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_103.pdf.xml',
'./data/technicaldrawings/numbers/annotations/PaulMay-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo653.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_480.pdf.xml',
'./data/technicaldrawings/numbers/annotations/JasonMiller-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_256.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_298.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1298.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo448.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_203.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal138.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_51.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_36.pdf.xml',
'./data/technicaldrawings/numbers/annotations/MorganSnyder-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1297.pdf.xml',
'./data/technicaldrawings/numbers/annotations/MaryJames-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal354.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/ErinDudley-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_230.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_484.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo140.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_104.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_137.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_9.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal115.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_106.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal126.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1377.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_260.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_258.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_336.pdf.xml',
'./data/technicaldrawings/numbers/annotations/MarkNunez-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_126.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_37.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_237.pdf.xml',
'./data/technicaldrawings/numbers/annotations/sluis19.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_111.pdf.xml',
'./data/technicaldrawings/numbers/annotations/TimothyMedina-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_581.pdf.xml',
'./data/technicaldrawings/numbers/annotations/JeffreyCarroll-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_1008.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_556.pdf.xml',
'./data/technicaldrawings/numbers/annotations/ShirleyRomero-0.xml',
'./data/technicaldrawings/numbers/annotations/LarryMorris-0.xml',
'./data/technicaldrawings/numbers/annotations/sluis48.pdf.xml',
'./data/technicaldrawings/numbers/annotations/EricWilliams-0.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_30.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1314.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_22.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/JohnGomezMD-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_48.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_300.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_155.pdf.xml',
'./data/technicaldrawings/numbers/annotations/JessicaBoone-0.xml',
'./data/technicaldrawings/numbers/annotations/ThomasDuncan-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal127.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_154.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_29.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo639.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_578.pdf.xml',
'./data/technicaldrawings/numbers/annotations/RebeccaBarker-0.xml',
'./data/technicaldrawings/numbers/annotations/KristieMunoz-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo155.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal293.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal351.pdf.xml',
'./data/technicaldrawings/numbers/annotations/sluis62.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/ChristopherVazquez-0.xml',
'./data/technicaldrawings/numbers/annotations/WayneNavarro-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1298.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/BrendaWeber-0.xml',
'./data/technicaldrawings/numbers/annotations/AngelWu-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal0.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/SharonAllen-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde6.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/veere_560.pdf.xml',
'./data/technicaldrawings/numbers/annotations/PaulReed-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_229.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_57.pdf.xml',
'./data/technicaldrawings/numbers/annotations/RonaldPham-0.xml',
'./data/technicaldrawings/numbers/annotations/MistyFrancis-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_496.pdf.xml',
'./data/technicaldrawings/numbers/annotations/ShannonLee-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1364.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_125.pdf.xml',
'./data/technicaldrawings/numbers/annotations/NancyWilson-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1300.pdf.xml',
'./data/technicaldrawings/numbers/annotations/WilliamByrd-0.xml',
'./data/technicaldrawings/numbers/annotations/StevenPoole-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1355.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde2.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_297.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_457.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde30.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1365.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1366.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/JeremyJohnson-0.xml',
'./data/technicaldrawings/numbers/annotations/StephanieAllen-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_249.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal128.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_5.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal122.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/ClaudiaHuff-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1359.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_555.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_508.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_130.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_166.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_305.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_459.pdf.xml',
'./data/technicaldrawings/numbers/annotations/EricStevens-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_535.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_548.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal10000.pdf.xml',
'./data/technicaldrawings/numbers/annotations/JoshuaFox-0.xml',
'./data/technicaldrawings/numbers/annotations/EdwinFoley-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1340.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_251.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_582.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_44.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal336.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo636.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1698.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo113.pdf-3.xml',
'./data/technicaldrawings/numbers/annotations/DianeJames-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde4.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_153.pdf.xml',
'./data/technicaldrawings/numbers/annotations/StephanieMartinDVM-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_164.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_254.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/JoseHawkins-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_272.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_27.pdf.xml',
'./data/technicaldrawings/numbers/annotations/LisaVasquez-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_485.pdf.xml',
'./data/technicaldrawings/numbers/annotations/DavidAllen-0.xml',
'./data/technicaldrawings/numbers/annotations/SharonRichards-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_576.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde13.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_290.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1356.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_37.pdf.xml',
'./data/technicaldrawings/numbers/annotations/TeresaJones-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_603.pdf.xml',
'./data/technicaldrawings/numbers/annotations/CarlColeMD-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_551.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal330.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_523.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_39.pdf.xml',
'./data/technicaldrawings/numbers/annotations/MaryCox-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal127.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_215.pdf.xml',
'./data/technicaldrawings/numbers/annotations/VictorHurst-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_102.pdf.xml',
'./data/technicaldrawings/numbers/annotations/JackWade-0.xml',
'./data/technicaldrawings/numbers/annotations/RodneyHerreraMD-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_307.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1306.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1000.pdf.xml',
'./data/technicaldrawings/numbers/annotations/KimberlyJohnson-0.xml',
'./data/technicaldrawings/numbers/annotations/ShirleyRomero-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_511.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_600.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_39.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde59.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/sluis58.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_552.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_183.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1312.pdf.xml',
'./data/technicaldrawings/numbers/annotations/SamuelHall-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_506.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_527.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_536.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_221.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_21.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_19.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/LisaWest-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_47.pdf.xml',
'./data/technicaldrawings/numbers/annotations/KimberlyAguirre-0.xml',
'./data/technicaldrawings/numbers/annotations/BrendanRamos-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_133.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1320.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_109.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_602.pdf.xml',
'./data/technicaldrawings/numbers/annotations/RhondaKelly-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_291.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal295.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_544.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_253.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_465.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1358.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1343.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_497.pdf.xml',
'./data/technicaldrawings/numbers/annotations/ChadHamilton-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_530.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal133.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1709.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_277.pdf.xml',
'./data/technicaldrawings/numbers/annotations/TylerWeeks-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_559.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_13.pdf.xml',
'./data/technicaldrawings/numbers/annotations/VictoriaWilliams-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_248.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_1001.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_69.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal139.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/DanielStevenson-0.xml',
'./data/technicaldrawings/numbers/annotations/DonaldCollins-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_175.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde7.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_28.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_219.pdf.xml',
'./data/technicaldrawings/numbers/annotations/Mrs.ChristinePhillipsMD-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_567.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_302.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo462.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_473.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_118.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal341.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_127.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_174.pdf.xml',
'./data/technicaldrawings/numbers/annotations/TonyJackson-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal10005.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_472.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_7.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde1393.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_194.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_1012.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde35.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde74.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo460.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_4.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_151.pdf.xml',
'./data/technicaldrawings/numbers/annotations/JamesWright-0.xml',
'./data/technicaldrawings/numbers/annotations/StevenClay-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal104.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_587.pdf.xml',
'./data/technicaldrawings/numbers/annotations/sluis22.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/sluis49.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo455.pdf.xml',
'./data/technicaldrawings/numbers/annotations/RebeccaHanson-0.xml',
'./data/technicaldrawings/numbers/annotations/IanTodd-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde6.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal325.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/sluis20.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_61.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde3.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/CindyMoore-1.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1330.pdf.xml',
'./data/technicaldrawings/numbers/annotations/StephenPowell-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo445.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_1007.pdf.xml',
'./data/technicaldrawings/numbers/annotations/LoriParker-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_308.pdf.xml',
'./data/technicaldrawings/numbers/annotations/NicholasRivera-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_101.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_519.pdf.xml',
'./data/technicaldrawings/numbers/annotations/RodneyJoseph-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1362.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_20.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo466.pdf.xml',
'./data/technicaldrawings/numbers/annotations/MichaelRobles-0.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_2.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal137.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo114.pdf-3.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal357.pdf.xml',
'./data/technicaldrawings/numbers/annotations/LindseyMelton-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_26.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_469.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_189.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_100.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde131.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_504.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo115.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/sluis58.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/veere_185.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_211.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_100.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_483.pdf.xml',
'./data/technicaldrawings/numbers/annotations/JoshuaCarroll-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_490.pdf.xml',
'./data/technicaldrawings/numbers/annotations/JustinVilla-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo669.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo146.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_495.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_19.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1025.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde147.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_565.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_482.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_20.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_502.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_15.pdf.xml',
'./data/technicaldrawings/numbers/annotations/AmyRoberts-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_586.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1321.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_531.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal135.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_192.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde27.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1316.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/SallyWood-0.xml',
'./data/technicaldrawings/numbers/annotations/NancyColon-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal130.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1696.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/MichelleCasey-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_1002.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo648.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal376.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal333.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1699.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_568.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_306.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_302.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_199.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1696.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_281.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_3.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_63.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1333.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_259.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal116.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1348.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde3.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1344.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1382.pdf.xml',
'./data/technicaldrawings/numbers/annotations/RhondaKelly-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1313.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_137.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde36.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_186.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal365.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1318.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_116.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_11.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1376.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_27.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_528.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_594.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_31.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_461.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal295.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_75.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_107.pdf.xml',
'./data/technicaldrawings/numbers/annotations/sluis42.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde30.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_575.pdf.xml',
'./data/technicaldrawings/numbers/annotations/CalvinGarner-0.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_5.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_1010.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal124.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde141.pdf.xml',
'./data/technicaldrawings/numbers/annotations/ChristopherJohnson-0.xml',
'./data/technicaldrawings/numbers/annotations/CalvinNolan-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1311.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal132.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1014.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde43.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo652.pdf.xml',
'./data/technicaldrawings/numbers/annotations/JeffreyMaddox-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_178.pdf.xml',
'./data/technicaldrawings/numbers/annotations/KellyBrown-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_26.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal122.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/KarenSmith-0.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_447.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_46.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/veere_542.pdf.xml',
'./data/technicaldrawings/numbers/annotations/DanielCampbell-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde63.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal112.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_15.pdf.xml',
'./data/technicaldrawings/numbers/annotations/JanetRobinson-0.xml',
'./data/technicaldrawings/numbers/annotations/MatthewLeonardJr.-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde130.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1380.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_71.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_107.pdf.xml',
'./data/technicaldrawings/numbers/annotations/AmandaBell-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_228.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_289.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_72.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/AliciaBrown-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde58.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1375.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_227.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_102.pdf.xml',
'./data/technicaldrawings/numbers/annotations/SamanthaKelley-0.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_3.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal348.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_14.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_52.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_493.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_21.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_294.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_529.pdf.xml',
'./data/technicaldrawings/numbers/annotations/sluis51.pdf.xml',
'./data/technicaldrawings/numbers/annotations/DerrickHill-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1702.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde33.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_257.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal116.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde66.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo445.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/ChristyAllen-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_63.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/GregDavenport-0.xml',
'./data/technicaldrawings/numbers/annotations/sluis61.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/veere_546.pdf.xml',
'./data/technicaldrawings/numbers/annotations/CarrieSanchez-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_599.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_6.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1699.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_218.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_202.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_161.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_100.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1373.pdf.xml',
'./data/technicaldrawings/numbers/annotations/SonyaFitzgerald-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde61.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1378.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1323.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_168.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1708.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_6.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo469.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1706.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_518.pdf.xml',
'./data/technicaldrawings/numbers/annotations/BrandonHowell-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_14.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_588.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_532.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo446.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/veere_131.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1370.pdf.xml',
'./data/technicaldrawings/numbers/annotations/sluis25.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1317.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_283.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo115.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/LisaDavenport-0.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_471.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1360.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/BarbaraYu-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_177.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo468.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1705.pdf.xml',
'./data/technicaldrawings/numbers/annotations/sluis84.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_111.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo446.pdf-3.xml',
'./data/technicaldrawings/numbers/annotations/WillieDrake-0.xml',
'./data/technicaldrawings/numbers/annotations/ManuelAllen-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_547.pdf.xml',
'./data/technicaldrawings/numbers/annotations/NicoleWebster-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal112.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/JamesPowell-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_53.pdf.xml',
'./data/technicaldrawings/numbers/annotations/CoreyHoover-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_72.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1349.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde1027.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal325.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal140.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1054.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_254.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_557.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_17.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_169.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo139.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo113.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_118.pdf.xml',
'./data/technicaldrawings/numbers/annotations/KylePalmerMD-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_156.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/KatherineGraham-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1706.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_68.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_525.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_252.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_137.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_583.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1371.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_22.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1298.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/veere_514.pdf.xml',
'./data/technicaldrawings/numbers/annotations/MathewEspinoza-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_72.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1700.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/TerryKing-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1360.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_10.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde61.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal388.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_589.pdf.xml',
'./data/technicaldrawings/numbers/annotations/RobertRodgers-0.xml',
'./data/technicaldrawings/numbers/annotations/ReginaldPerez-0.xml',
'./data/technicaldrawings/numbers/annotations/MarissaRivera-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_492.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1341.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal293.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_591.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_486.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1002.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_12.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo152.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde1028.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_526.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_210.pdf.xml',
'./data/technicaldrawings/numbers/annotations/PaulaStevens-0.xml',
'./data/technicaldrawings/numbers/annotations/sluis75.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1004.pdf.xml',
'./data/technicaldrawings/numbers/annotations/MelanieMacdonald-0.xml',
'./data/technicaldrawings/numbers/annotations/JessicaRoman-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_512.pdf.xml',
'./data/technicaldrawings/numbers/annotations/MichelleGriffith-0.xml',
'./data/technicaldrawings/numbers/annotations/StephanieCunningham-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_534.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1301.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo475.pdf.xml',
'./data/technicaldrawings/numbers/annotations/BryceShaffer-0.xml',
'./data/technicaldrawings/numbers/annotations/ChristineRose-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_15.pdf.xml',
'./data/technicaldrawings/numbers/annotations/WilliePeters-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_522.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_157.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde1389.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_303.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_222.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_520.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1707.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1331.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo144.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal378.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo444.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal123.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal303.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/JessicaGomez-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_19.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_585.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_196.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_110.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_188.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_569.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1305.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_18.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_301.pdf.xml',
'./data/technicaldrawings/numbers/annotations/ElaineWu-0.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo634.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde2.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_22.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde14.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo143.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal123.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_109.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_573.pdf.xml',
'./data/technicaldrawings/numbers/annotations/borger-odoorn_448.pdf.xml',
'./data/technicaldrawings/numbers/annotations/DonaldCline-0.xml',
'./data/technicaldrawings/numbers/annotations/EthanOlsen-0.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1295.pdf.xml',
'./data/technicaldrawings/numbers/annotations/VictoriaOneal-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal108.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_191.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal371.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_261.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_296.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_590.pdf.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde35.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo153.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_230.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/RebeccaValdez-0.xml',
'./data/technicaldrawings/numbers/annotations/Dr.DeannaWalters-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal1047.pdf-2.xml',
'./data/technicaldrawings/numbers/annotations/veere_179.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_1009.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo459.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_114.pdf.xml',
'./data/technicaldrawings/numbers/annotations/MatthewHicks-0.xml',
'./data/technicaldrawings/numbers/annotations/zeewolde7.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_562.pdf.xml',
'./data/technicaldrawings/numbers/annotations/RhondaGreenMD-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_13.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo158.pdf.xml',
'./data/technicaldrawings/numbers/annotations/JessicaHarper-0.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal131.pdf.xml',
'./data/technicaldrawings/numbers/annotations/steenwijkerland_1311.pdf-1.xml',
'./data/technicaldrawings/numbers/annotations/veere_22.pdf-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_501.pdf.xml',
'./data/technicaldrawings/numbers/annotations/TinaRandall-0.xml',
'./data/technicaldrawings/numbers/annotations/veere_574.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_595.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_250.pdf.xml',
'./data/technicaldrawings/numbers/annotations/tynaarlo453.pdf.xml',
'./data/technicaldrawings/numbers/annotations/veere_223.pdf.xml',
'./data/technicaldrawings/numbers/annotations/stadskanaal113.pdf.xml',
]
filenames=[join(_init_paths.faster_rcnn_root, f) for f in filenames]
imdb=get_imdb("technicaldrawings_numbers_train")
roi_db=imdb.gt_roidb()
# filenames = ['JamesBell-0.jpg', 'stadskanaal386.pdf.xml',',',',',', 'stadskanaal130.pdf-1.xml',',',',', 'WilliamMorris-0.xml',',',',',
# 'steenwijkerland_1323.pdf-1.xml',',',',']
def getBasenameNoExt(filename):
return os.path.splitext(basename(filename))[0]
entry_dict={}
for roi_entry in roi_db:
entry_dict[getBasenameNoExt(roi_entry['filename'])]=roi_entry
im_root='/home/dennis/workspace/py-faster-rcnn/data/technicaldrawings/numbers/images/'
for im_idx in range(0,340):
scores=[]
def thresh(bboxes, scores, th):
# unknown_inds = scores < 0
# bg_inds = (scores >= 0) & (scores <= 0.5)
fg_inds = scores > th
return bboxes[fg_inds], scores[fg_inds]
def loadFinalProposals():
data=cPickle.load(file('/home/dennis/workspace/faster-rcnn-scenarios/private/scenarios/feat_stride_8/output/final_proposals_%d.pkl' % im_idx,'r'))
scores=np.array(data[2])[:,1]
boxes=data[0]
im_info=data[1]
boxes, scores = thresh(boxes, scores, 0.2)
return boxes, scores, im_info
def loadRpnProposals():
data=cPickle.load(file('/home/dennis/workspace/faster-rcnn-scenarios/private/scenarios/feat_stride_8/output/proposals_%d.pkl' % im_idx,'r'));
print data
im_info, boxes, scores = data
boxes=np.array(boxes)
scores = np.array(scores).flatten()
print boxes
print scores
boxes, scores = thresh(boxes, scores, 0.98)
return boxes, scores, im_info
def save(boxes, scores, im_info, infix='proposals'):
im_entry=entry_dict[getBasenameNoExt(filenames[im_idx])]
bbox_helper = BBoxHelper(im_entry, join(im_root, filenames[im_idx]), im_info)
# bboxes=[ [x1,y1,x2,y2] for y1,x1, y2, x2 in bboxes]
prefix = prefix = "{0:0>4}_".format(im_idx) + "_" + infix
bbox_helper.saveBoundingBoxesToImage(boxes,'/home/dennis/workspace/faster-rcnn-scenarios/src/analysis/output', scores, prefix)
boxes, scores, im_info=loadFinalProposals()
# save(proposals, 'proposals')
save(boxes, scores, im_info, 'final_proposals')
boxes, scores, im_info=loadRpnProposals()
save(boxes, scores, im_info, 'rpn_proposals')
| 66.000965
| 154
| 0.817497
| 7,735
| 68,377
| 7.152941
| 0.120362
| 0.409051
| 0.476721
| 0.663299
| 0.879302
| 0.876247
| 0.674667
| 0.253832
| 0.046089
| 0.01023
| 0
| 0.038711
| 0.021133
| 68,377
| 1,035
| 155
| 66.064734
| 0.787919
| 0.00525
| 0
| 0.002002
| 0
| 0.003003
| 0.914264
| 0.913279
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.009009
| null | null | 0.003003
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
045b93d393f980d41f237b9ad66603a982a5b3c5
| 42,755
|
py
|
Python
|
sdk/python/pulumi_cloudflare/load_balancer.py
|
pulumi/pulumi-cloudflare
|
d444af2fab6101b388a15cf2e3933e45e9935cc6
|
[
"ECL-2.0",
"Apache-2.0"
] | 35
|
2019-03-14T21:29:29.000Z
|
2022-03-30T00:00:59.000Z
|
sdk/python/pulumi_cloudflare/load_balancer.py
|
pulumi/pulumi-cloudflare
|
d444af2fab6101b388a15cf2e3933e45e9935cc6
|
[
"ECL-2.0",
"Apache-2.0"
] | 128
|
2019-03-08T23:45:58.000Z
|
2022-03-31T21:05:22.000Z
|
sdk/python/pulumi_cloudflare/load_balancer.py
|
pulumi/pulumi-cloudflare
|
d444af2fab6101b388a15cf2e3933e45e9935cc6
|
[
"ECL-2.0",
"Apache-2.0"
] | 6
|
2019-05-10T12:52:56.000Z
|
2020-03-24T15:02:14.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['LoadBalancerArgs', 'LoadBalancer']
@pulumi.input_type
class LoadBalancerArgs:
def __init__(__self__, *,
default_pool_ids: pulumi.Input[Sequence[pulumi.Input[str]]],
fallback_pool_id: pulumi.Input[str],
name: pulumi.Input[str],
zone_id: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
pop_pools: Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerPopPoolArgs']]]] = None,
proxied: Optional[pulumi.Input[bool]] = None,
region_pools: Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerRegionPoolArgs']]]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerRuleArgs']]]] = None,
session_affinity: Optional[pulumi.Input[str]] = None,
session_affinity_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
session_affinity_ttl: Optional[pulumi.Input[int]] = None,
steering_policy: Optional[pulumi.Input[str]] = None,
ttl: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a LoadBalancer resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] default_pool_ids: A list of pool IDs ordered by their failover priority. Used whenever region/pop pools are not defined.
:param pulumi.Input[str] fallback_pool_id: The pool ID to use when all other pools are detected as unhealthy.
:param pulumi.Input[str] name: Human readable name for this rule.
:param pulumi.Input[str] zone_id: The zone ID to add the load balancer to.
:param pulumi.Input[str] description: Free text description.
:param pulumi.Input[bool] enabled: Enable or disable the load balancer. Defaults to `true` (enabled).
:param pulumi.Input[Sequence[pulumi.Input['LoadBalancerPopPoolArgs']]] pop_pools: See pop_pools above.
:param pulumi.Input[bool] proxied: Whether the hostname gets Cloudflare's origin protection. Defaults to `false`.
:param pulumi.Input[Sequence[pulumi.Input['LoadBalancerRegionPoolArgs']]] region_pools: See region_pools above.
:param pulumi.Input[Sequence[pulumi.Input['LoadBalancerRuleArgs']]] rules: A list of conditions and overrides for each load balancer operation. See the field documentation below.
:param pulumi.Input[str] session_affinity: See field above.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] session_affinity_attributes: See field above.
:param pulumi.Input[int] session_affinity_ttl: See field above.
:param pulumi.Input[str] steering_policy: See field above.
:param pulumi.Input[int] ttl: See field above.
"""
pulumi.set(__self__, "default_pool_ids", default_pool_ids)
pulumi.set(__self__, "fallback_pool_id", fallback_pool_id)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "zone_id", zone_id)
if description is not None:
pulumi.set(__self__, "description", description)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if pop_pools is not None:
pulumi.set(__self__, "pop_pools", pop_pools)
if proxied is not None:
pulumi.set(__self__, "proxied", proxied)
if region_pools is not None:
pulumi.set(__self__, "region_pools", region_pools)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if session_affinity is not None:
pulumi.set(__self__, "session_affinity", session_affinity)
if session_affinity_attributes is not None:
pulumi.set(__self__, "session_affinity_attributes", session_affinity_attributes)
if session_affinity_ttl is not None:
pulumi.set(__self__, "session_affinity_ttl", session_affinity_ttl)
if steering_policy is not None:
pulumi.set(__self__, "steering_policy", steering_policy)
if ttl is not None:
pulumi.set(__self__, "ttl", ttl)
@property
@pulumi.getter(name="defaultPoolIds")
def default_pool_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of pool IDs ordered by their failover priority. Used whenever region/pop pools are not defined.
"""
return pulumi.get(self, "default_pool_ids")
@default_pool_ids.setter
def default_pool_ids(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "default_pool_ids", value)
@property
@pulumi.getter(name="fallbackPoolId")
def fallback_pool_id(self) -> pulumi.Input[str]:
"""
The pool ID to use when all other pools are detected as unhealthy.
"""
return pulumi.get(self, "fallback_pool_id")
@fallback_pool_id.setter
def fallback_pool_id(self, value: pulumi.Input[str]):
pulumi.set(self, "fallback_pool_id", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Human readable name for this rule.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="zoneId")
def zone_id(self) -> pulumi.Input[str]:
"""
The zone ID to add the load balancer to.
"""
return pulumi.get(self, "zone_id")
@zone_id.setter
def zone_id(self, value: pulumi.Input[str]):
pulumi.set(self, "zone_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Free text description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable or disable the load balancer. Defaults to `true` (enabled).
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="popPools")
def pop_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerPopPoolArgs']]]]:
"""
See pop_pools above.
"""
return pulumi.get(self, "pop_pools")
@pop_pools.setter
def pop_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerPopPoolArgs']]]]):
pulumi.set(self, "pop_pools", value)
@property
@pulumi.getter
def proxied(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the hostname gets Cloudflare's origin protection. Defaults to `false`.
"""
return pulumi.get(self, "proxied")
@proxied.setter
def proxied(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "proxied", value)
@property
@pulumi.getter(name="regionPools")
def region_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerRegionPoolArgs']]]]:
"""
See region_pools above.
"""
return pulumi.get(self, "region_pools")
@region_pools.setter
def region_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerRegionPoolArgs']]]]):
pulumi.set(self, "region_pools", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerRuleArgs']]]]:
"""
A list of conditions and overrides for each load balancer operation. See the field documentation below.
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerRuleArgs']]]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter(name="sessionAffinity")
def session_affinity(self) -> Optional[pulumi.Input[str]]:
"""
See field above.
"""
return pulumi.get(self, "session_affinity")
@session_affinity.setter
def session_affinity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "session_affinity", value)
@property
@pulumi.getter(name="sessionAffinityAttributes")
def session_affinity_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
See field above.
"""
return pulumi.get(self, "session_affinity_attributes")
@session_affinity_attributes.setter
def session_affinity_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "session_affinity_attributes", value)
@property
@pulumi.getter(name="sessionAffinityTtl")
def session_affinity_ttl(self) -> Optional[pulumi.Input[int]]:
"""
See field above.
"""
return pulumi.get(self, "session_affinity_ttl")
@session_affinity_ttl.setter
def session_affinity_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "session_affinity_ttl", value)
@property
@pulumi.getter(name="steeringPolicy")
def steering_policy(self) -> Optional[pulumi.Input[str]]:
"""
See field above.
"""
return pulumi.get(self, "steering_policy")
@steering_policy.setter
def steering_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "steering_policy", value)
@property
@pulumi.getter
def ttl(self) -> Optional[pulumi.Input[int]]:
"""
See field above.
"""
return pulumi.get(self, "ttl")
@ttl.setter
def ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ttl", value)
@pulumi.input_type
class _LoadBalancerState:
def __init__(__self__, *,
created_on: Optional[pulumi.Input[str]] = None,
default_pool_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
fallback_pool_id: Optional[pulumi.Input[str]] = None,
modified_on: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
pop_pools: Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerPopPoolArgs']]]] = None,
proxied: Optional[pulumi.Input[bool]] = None,
region_pools: Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerRegionPoolArgs']]]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerRuleArgs']]]] = None,
session_affinity: Optional[pulumi.Input[str]] = None,
session_affinity_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
session_affinity_ttl: Optional[pulumi.Input[int]] = None,
steering_policy: Optional[pulumi.Input[str]] = None,
ttl: Optional[pulumi.Input[int]] = None,
zone_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering LoadBalancer resources.
:param pulumi.Input[str] created_on: The RFC3339 timestamp of when the load balancer was created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] default_pool_ids: A list of pool IDs ordered by their failover priority. Used whenever region/pop pools are not defined.
:param pulumi.Input[str] description: Free text description.
:param pulumi.Input[bool] enabled: Enable or disable the load balancer. Defaults to `true` (enabled).
:param pulumi.Input[str] fallback_pool_id: The pool ID to use when all other pools are detected as unhealthy.
:param pulumi.Input[str] modified_on: The RFC3339 timestamp of when the load balancer was last modified.
:param pulumi.Input[str] name: Human readable name for this rule.
:param pulumi.Input[Sequence[pulumi.Input['LoadBalancerPopPoolArgs']]] pop_pools: See pop_pools above.
:param pulumi.Input[bool] proxied: Whether the hostname gets Cloudflare's origin protection. Defaults to `false`.
:param pulumi.Input[Sequence[pulumi.Input['LoadBalancerRegionPoolArgs']]] region_pools: See region_pools above.
:param pulumi.Input[Sequence[pulumi.Input['LoadBalancerRuleArgs']]] rules: A list of conditions and overrides for each load balancer operation. See the field documentation below.
:param pulumi.Input[str] session_affinity: See field above.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] session_affinity_attributes: See field above.
:param pulumi.Input[int] session_affinity_ttl: See field above.
:param pulumi.Input[str] steering_policy: See field above.
:param pulumi.Input[int] ttl: See field above.
:param pulumi.Input[str] zone_id: The zone ID to add the load balancer to.
"""
if created_on is not None:
pulumi.set(__self__, "created_on", created_on)
if default_pool_ids is not None:
pulumi.set(__self__, "default_pool_ids", default_pool_ids)
if description is not None:
pulumi.set(__self__, "description", description)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if fallback_pool_id is not None:
pulumi.set(__self__, "fallback_pool_id", fallback_pool_id)
if modified_on is not None:
pulumi.set(__self__, "modified_on", modified_on)
if name is not None:
pulumi.set(__self__, "name", name)
if pop_pools is not None:
pulumi.set(__self__, "pop_pools", pop_pools)
if proxied is not None:
pulumi.set(__self__, "proxied", proxied)
if region_pools is not None:
pulumi.set(__self__, "region_pools", region_pools)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if session_affinity is not None:
pulumi.set(__self__, "session_affinity", session_affinity)
if session_affinity_attributes is not None:
pulumi.set(__self__, "session_affinity_attributes", session_affinity_attributes)
if session_affinity_ttl is not None:
pulumi.set(__self__, "session_affinity_ttl", session_affinity_ttl)
if steering_policy is not None:
pulumi.set(__self__, "steering_policy", steering_policy)
if ttl is not None:
pulumi.set(__self__, "ttl", ttl)
if zone_id is not None:
pulumi.set(__self__, "zone_id", zone_id)
@property
@pulumi.getter(name="createdOn")
def created_on(self) -> Optional[pulumi.Input[str]]:
"""
The RFC3339 timestamp of when the load balancer was created.
"""
return pulumi.get(self, "created_on")
@created_on.setter
def created_on(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_on", value)
@property
@pulumi.getter(name="defaultPoolIds")
def default_pool_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of pool IDs ordered by their failover priority. Used whenever region/pop pools are not defined.
"""
return pulumi.get(self, "default_pool_ids")
@default_pool_ids.setter
def default_pool_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "default_pool_ids", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Free text description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable or disable the load balancer. Defaults to `true` (enabled).
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="fallbackPoolId")
def fallback_pool_id(self) -> Optional[pulumi.Input[str]]:
"""
The pool ID to use when all other pools are detected as unhealthy.
"""
return pulumi.get(self, "fallback_pool_id")
@fallback_pool_id.setter
def fallback_pool_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fallback_pool_id", value)
@property
@pulumi.getter(name="modifiedOn")
def modified_on(self) -> Optional[pulumi.Input[str]]:
"""
The RFC3339 timestamp of when the load balancer was last modified.
"""
return pulumi.get(self, "modified_on")
@modified_on.setter
def modified_on(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "modified_on", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Human readable name for this rule.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="popPools")
def pop_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerPopPoolArgs']]]]:
"""
See pop_pools above.
"""
return pulumi.get(self, "pop_pools")
@pop_pools.setter
def pop_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerPopPoolArgs']]]]):
pulumi.set(self, "pop_pools", value)
@property
@pulumi.getter
def proxied(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the hostname gets Cloudflare's origin protection. Defaults to `false`.
"""
return pulumi.get(self, "proxied")
@proxied.setter
def proxied(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "proxied", value)
@property
@pulumi.getter(name="regionPools")
def region_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerRegionPoolArgs']]]]:
"""
See region_pools above.
"""
return pulumi.get(self, "region_pools")
@region_pools.setter
def region_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerRegionPoolArgs']]]]):
pulumi.set(self, "region_pools", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerRuleArgs']]]]:
"""
A list of conditions and overrides for each load balancer operation. See the field documentation below.
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerRuleArgs']]]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter(name="sessionAffinity")
def session_affinity(self) -> Optional[pulumi.Input[str]]:
"""
See field above.
"""
return pulumi.get(self, "session_affinity")
@session_affinity.setter
def session_affinity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "session_affinity", value)
@property
@pulumi.getter(name="sessionAffinityAttributes")
def session_affinity_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
See field above.
"""
return pulumi.get(self, "session_affinity_attributes")
@session_affinity_attributes.setter
def session_affinity_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "session_affinity_attributes", value)
@property
@pulumi.getter(name="sessionAffinityTtl")
def session_affinity_ttl(self) -> Optional[pulumi.Input[int]]:
"""
See field above.
"""
return pulumi.get(self, "session_affinity_ttl")
@session_affinity_ttl.setter
def session_affinity_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "session_affinity_ttl", value)
@property
@pulumi.getter(name="steeringPolicy")
def steering_policy(self) -> Optional[pulumi.Input[str]]:
"""
See field above.
"""
return pulumi.get(self, "steering_policy")
@steering_policy.setter
def steering_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "steering_policy", value)
@property
@pulumi.getter
def ttl(self) -> Optional[pulumi.Input[int]]:
"""
See field above.
"""
return pulumi.get(self, "ttl")
@ttl.setter
def ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ttl", value)
@property
@pulumi.getter(name="zoneId")
def zone_id(self) -> Optional[pulumi.Input[str]]:
"""
The zone ID to add the load balancer to.
"""
return pulumi.get(self, "zone_id")
@zone_id.setter
def zone_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zone_id", value)
class LoadBalancer(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
default_pool_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
fallback_pool_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
pop_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerPopPoolArgs']]]]] = None,
proxied: Optional[pulumi.Input[bool]] = None,
region_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerRegionPoolArgs']]]]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerRuleArgs']]]]] = None,
session_affinity: Optional[pulumi.Input[str]] = None,
session_affinity_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
session_affinity_ttl: Optional[pulumi.Input[int]] = None,
steering_policy: Optional[pulumi.Input[str]] = None,
ttl: Optional[pulumi.Input[int]] = None,
zone_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Cloudflare Load Balancer resource. This sits in front of a number of defined pools of origins and provides various options for geographically-aware load balancing. Note that the load balancing feature must be enabled in your Cloudflare account before you can use this resource.
## Example Usage
```python
import pulumi
import pulumi_cloudflare as cloudflare
foo = cloudflare.LoadBalancerPool("foo",
name="example-lb-pool",
origins=[cloudflare.LoadBalancerPoolOriginArgs(
name="example-1",
address="192.0.2.1",
enabled=False,
)])
# Define a load balancer which always points to a pool we define below
# In normal usage, would have different pools set for different pops (cloudflare points-of-presence) and/or for different regions
# Within each pop or region we can define multiple pools in failover order
bar = cloudflare.LoadBalancer("bar",
zone_id="d41d8cd98f00b204e9800998ecf8427e",
name="example-load-balancer.example.com",
fallback_pool_id=foo.id,
default_pool_ids=[foo.id],
description="example load balancer using geo-balancing",
proxied=True,
steering_policy="geo",
pop_pools=[cloudflare.LoadBalancerPopPoolArgs(
pop="LAX",
pool_ids=[foo.id],
)],
region_pools=[cloudflare.LoadBalancerRegionPoolArgs(
region="WNAM",
pool_ids=[foo.id],
)],
rules=[cloudflare.LoadBalancerRuleArgs(
name="example rule",
condition="http.request.uri.path contains \"testing\"",
fixed_response=cloudflare.LoadBalancerRuleFixedResponseArgs(
message_body="hello",
status_code=200,
content_type="html",
location="www.example.com",
),
)])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] default_pool_ids: A list of pool IDs ordered by their failover priority. Used whenever region/pop pools are not defined.
:param pulumi.Input[str] description: Free text description.
:param pulumi.Input[bool] enabled: Enable or disable the load balancer. Defaults to `true` (enabled).
:param pulumi.Input[str] fallback_pool_id: The pool ID to use when all other pools are detected as unhealthy.
:param pulumi.Input[str] name: Human readable name for this rule.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerPopPoolArgs']]]] pop_pools: See pop_pools above.
:param pulumi.Input[bool] proxied: Whether the hostname gets Cloudflare's origin protection. Defaults to `false`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerRegionPoolArgs']]]] region_pools: See region_pools above.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerRuleArgs']]]] rules: A list of conditions and overrides for each load balancer operation. See the field documentation below.
:param pulumi.Input[str] session_affinity: See field above.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] session_affinity_attributes: See field above.
:param pulumi.Input[int] session_affinity_ttl: See field above.
:param pulumi.Input[str] steering_policy: See field above.
:param pulumi.Input[int] ttl: See field above.
:param pulumi.Input[str] zone_id: The zone ID to add the load balancer to.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: LoadBalancerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Cloudflare Load Balancer resource. This sits in front of a number of defined pools of origins and provides various options for geographically-aware load balancing. Note that the load balancing feature must be enabled in your Cloudflare account before you can use this resource.
## Example Usage
```python
import pulumi
import pulumi_cloudflare as cloudflare
foo = cloudflare.LoadBalancerPool("foo",
name="example-lb-pool",
origins=[cloudflare.LoadBalancerPoolOriginArgs(
name="example-1",
address="192.0.2.1",
enabled=False,
)])
# Define a load balancer which always points to a pool we define below
# In normal usage, would have different pools set for different pops (cloudflare points-of-presence) and/or for different regions
# Within each pop or region we can define multiple pools in failover order
bar = cloudflare.LoadBalancer("bar",
zone_id="d41d8cd98f00b204e9800998ecf8427e",
name="example-load-balancer.example.com",
fallback_pool_id=foo.id,
default_pool_ids=[foo.id],
description="example load balancer using geo-balancing",
proxied=True,
steering_policy="geo",
pop_pools=[cloudflare.LoadBalancerPopPoolArgs(
pop="LAX",
pool_ids=[foo.id],
)],
region_pools=[cloudflare.LoadBalancerRegionPoolArgs(
region="WNAM",
pool_ids=[foo.id],
)],
rules=[cloudflare.LoadBalancerRuleArgs(
name="example rule",
condition="http.request.uri.path contains \"testing\"",
fixed_response=cloudflare.LoadBalancerRuleFixedResponseArgs(
message_body="hello",
status_code=200,
content_type="html",
location="www.example.com",
),
)])
```
:param str resource_name: The name of the resource.
:param LoadBalancerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(LoadBalancerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
default_pool_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
fallback_pool_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
pop_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerPopPoolArgs']]]]] = None,
proxied: Optional[pulumi.Input[bool]] = None,
region_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerRegionPoolArgs']]]]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerRuleArgs']]]]] = None,
session_affinity: Optional[pulumi.Input[str]] = None,
session_affinity_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
session_affinity_ttl: Optional[pulumi.Input[int]] = None,
steering_policy: Optional[pulumi.Input[str]] = None,
ttl: Optional[pulumi.Input[int]] = None,
zone_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = LoadBalancerArgs.__new__(LoadBalancerArgs)
if default_pool_ids is None and not opts.urn:
raise TypeError("Missing required property 'default_pool_ids'")
__props__.__dict__["default_pool_ids"] = default_pool_ids
__props__.__dict__["description"] = description
__props__.__dict__["enabled"] = enabled
if fallback_pool_id is None and not opts.urn:
raise TypeError("Missing required property 'fallback_pool_id'")
__props__.__dict__["fallback_pool_id"] = fallback_pool_id
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
__props__.__dict__["pop_pools"] = pop_pools
__props__.__dict__["proxied"] = proxied
__props__.__dict__["region_pools"] = region_pools
__props__.__dict__["rules"] = rules
__props__.__dict__["session_affinity"] = session_affinity
__props__.__dict__["session_affinity_attributes"] = session_affinity_attributes
__props__.__dict__["session_affinity_ttl"] = session_affinity_ttl
__props__.__dict__["steering_policy"] = steering_policy
__props__.__dict__["ttl"] = ttl
if zone_id is None and not opts.urn:
raise TypeError("Missing required property 'zone_id'")
__props__.__dict__["zone_id"] = zone_id
__props__.__dict__["created_on"] = None
__props__.__dict__["modified_on"] = None
super(LoadBalancer, __self__).__init__(
'cloudflare:index/loadBalancer:LoadBalancer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
created_on: Optional[pulumi.Input[str]] = None,
default_pool_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
fallback_pool_id: Optional[pulumi.Input[str]] = None,
modified_on: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
pop_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerPopPoolArgs']]]]] = None,
proxied: Optional[pulumi.Input[bool]] = None,
region_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerRegionPoolArgs']]]]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerRuleArgs']]]]] = None,
session_affinity: Optional[pulumi.Input[str]] = None,
session_affinity_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
session_affinity_ttl: Optional[pulumi.Input[int]] = None,
steering_policy: Optional[pulumi.Input[str]] = None,
ttl: Optional[pulumi.Input[int]] = None,
zone_id: Optional[pulumi.Input[str]] = None) -> 'LoadBalancer':
"""
Get an existing LoadBalancer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] created_on: The RFC3339 timestamp of when the load balancer was created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] default_pool_ids: A list of pool IDs ordered by their failover priority. Used whenever region/pop pools are not defined.
:param pulumi.Input[str] description: Free text description.
:param pulumi.Input[bool] enabled: Enable or disable the load balancer. Defaults to `true` (enabled).
:param pulumi.Input[str] fallback_pool_id: The pool ID to use when all other pools are detected as unhealthy.
:param pulumi.Input[str] modified_on: The RFC3339 timestamp of when the load balancer was last modified.
:param pulumi.Input[str] name: Human readable name for this rule.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerPopPoolArgs']]]] pop_pools: See pop_pools above.
:param pulumi.Input[bool] proxied: Whether the hostname gets Cloudflare's origin protection. Defaults to `false`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerRegionPoolArgs']]]] region_pools: See region_pools above.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerRuleArgs']]]] rules: A list of conditions and overrides for each load balancer operation. See the field documentation below.
:param pulumi.Input[str] session_affinity: See field above.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] session_affinity_attributes: See field above.
:param pulumi.Input[int] session_affinity_ttl: See field above.
:param pulumi.Input[str] steering_policy: See field above.
:param pulumi.Input[int] ttl: See field above.
:param pulumi.Input[str] zone_id: The zone ID to add the load balancer to.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _LoadBalancerState.__new__(_LoadBalancerState)
__props__.__dict__["created_on"] = created_on
__props__.__dict__["default_pool_ids"] = default_pool_ids
__props__.__dict__["description"] = description
__props__.__dict__["enabled"] = enabled
__props__.__dict__["fallback_pool_id"] = fallback_pool_id
__props__.__dict__["modified_on"] = modified_on
__props__.__dict__["name"] = name
__props__.__dict__["pop_pools"] = pop_pools
__props__.__dict__["proxied"] = proxied
__props__.__dict__["region_pools"] = region_pools
__props__.__dict__["rules"] = rules
__props__.__dict__["session_affinity"] = session_affinity
__props__.__dict__["session_affinity_attributes"] = session_affinity_attributes
__props__.__dict__["session_affinity_ttl"] = session_affinity_ttl
__props__.__dict__["steering_policy"] = steering_policy
__props__.__dict__["ttl"] = ttl
__props__.__dict__["zone_id"] = zone_id
return LoadBalancer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdOn")
def created_on(self) -> pulumi.Output[str]:
"""
The RFC3339 timestamp of when the load balancer was created.
"""
return pulumi.get(self, "created_on")
@property
@pulumi.getter(name="defaultPoolIds")
def default_pool_ids(self) -> pulumi.Output[Sequence[str]]:
"""
A list of pool IDs ordered by their failover priority. Used whenever region/pop pools are not defined.
"""
return pulumi.get(self, "default_pool_ids")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Free text description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Enable or disable the load balancer. Defaults to `true` (enabled).
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="fallbackPoolId")
def fallback_pool_id(self) -> pulumi.Output[str]:
"""
The pool ID to use when all other pools are detected as unhealthy.
"""
return pulumi.get(self, "fallback_pool_id")
@property
@pulumi.getter(name="modifiedOn")
def modified_on(self) -> pulumi.Output[str]:
"""
The RFC3339 timestamp of when the load balancer was last modified.
"""
return pulumi.get(self, "modified_on")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Human readable name for this rule.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="popPools")
def pop_pools(self) -> pulumi.Output[Sequence['outputs.LoadBalancerPopPool']]:
"""
See pop_pools above.
"""
return pulumi.get(self, "pop_pools")
@property
@pulumi.getter
def proxied(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the hostname gets Cloudflare's origin protection. Defaults to `false`.
"""
return pulumi.get(self, "proxied")
@property
@pulumi.getter(name="regionPools")
def region_pools(self) -> pulumi.Output[Sequence['outputs.LoadBalancerRegionPool']]:
"""
See region_pools above.
"""
return pulumi.get(self, "region_pools")
@property
@pulumi.getter
def rules(self) -> pulumi.Output[Optional[Sequence['outputs.LoadBalancerRule']]]:
"""
A list of conditions and overrides for each load balancer operation. See the field documentation below.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter(name="sessionAffinity")
def session_affinity(self) -> pulumi.Output[Optional[str]]:
"""
See field above.
"""
return pulumi.get(self, "session_affinity")
@property
@pulumi.getter(name="sessionAffinityAttributes")
def session_affinity_attributes(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
See field above.
"""
return pulumi.get(self, "session_affinity_attributes")
@property
@pulumi.getter(name="sessionAffinityTtl")
def session_affinity_ttl(self) -> pulumi.Output[Optional[int]]:
"""
See field above.
"""
return pulumi.get(self, "session_affinity_ttl")
@property
@pulumi.getter(name="steeringPolicy")
def steering_policy(self) -> pulumi.Output[str]:
"""
See field above.
"""
return pulumi.get(self, "steering_policy")
@property
@pulumi.getter
def ttl(self) -> pulumi.Output[int]:
"""
See field above.
"""
return pulumi.get(self, "ttl")
@property
@pulumi.getter(name="zoneId")
def zone_id(self) -> pulumi.Output[str]:
"""
The zone ID to add the load balancer to.
"""
return pulumi.get(self, "zone_id")
| 44.722803
| 296
| 0.645843
| 4,889
| 42,755
| 5.436899
| 0.057271
| 0.114217
| 0.093638
| 0.043866
| 0.924307
| 0.90828
| 0.893006
| 0.880328
| 0.875324
| 0.849667
| 0
| 0.002941
| 0.244416
| 42,755
| 955
| 297
| 44.769634
| 0.819873
| 0.300503
| 0
| 0.795113
| 1
| 0
| 0.117081
| 0.03217
| 0
| 0
| 0
| 0
| 0
| 1
| 0.165414
| false
| 0.00188
| 0.013158
| 0
| 0.278195
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f0e24ed522a46e732fa6385c15b7b772198463a6
| 180
|
py
|
Python
|
tests/context.py
|
witekbobrowski/humblecritic
|
08ff00e4e233c251453f20bac2593be70235fac8
|
[
"MIT"
] | 3
|
2018-02-24T12:23:21.000Z
|
2018-10-15T11:18:17.000Z
|
tests/context.py
|
witekbobrowski/humblecritic
|
08ff00e4e233c251453f20bac2593be70235fac8
|
[
"MIT"
] | 1
|
2018-02-24T20:24:57.000Z
|
2018-02-24T21:35:58.000Z
|
tests/context.py
|
witekbobrowski/humblecritic
|
08ff00e4e233c251453f20bac2593be70235fac8
|
[
"MIT"
] | 1
|
2018-02-27T16:07:28.000Z
|
2018-02-27T16:07:28.000Z
|
#!/usr/bin/env python3
# -*- coding : utf-8 -*-
# Author: Witek Bobrowski
from humblecritic import goodreads
from humblecritic import humblebundle
from humblecritic import config
| 22.5
| 37
| 0.772222
| 22
| 180
| 6.318182
| 0.727273
| 0.345324
| 0.47482
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012903
| 0.138889
| 180
| 7
| 38
| 25.714286
| 0.883871
| 0.377778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f0ec7e98aa2f5fb11af5827b7dd14b4c3fb08d8d
| 87
|
py
|
Python
|
python-analysers/src/test/resources/org/jetbrains/research/lupa/pythonAnalysis/imports/analysis/psi/importStatementsData/in_9_several_absolute_imports.py
|
JetBrains-Research/Lupa
|
c105487621564c60cae17395bf32eb40868ceb89
|
[
"Apache-2.0"
] | 16
|
2022-01-11T00:32:20.000Z
|
2022-03-25T21:40:52.000Z
|
python-analysers/src/test/resources/org/jetbrains/research/lupa/pythonAnalysis/imports/analysis/psi/importStatementsData/in_9_several_absolute_imports.py
|
nbirillo/Kotlin-Analysis
|
73c3b8a59bf40ed932bb512f30b0ff31f251af40
|
[
"Apache-2.0"
] | 12
|
2021-07-05T11:42:01.000Z
|
2021-12-23T07:57:54.000Z
|
python-analysers/src/test/resources/org/jetbrains/research/lupa/pythonAnalysis/imports/analysis/psi/importStatementsData/in_9_several_absolute_imports.py
|
nbirillo/Kotlin-Analysis
|
73c3b8a59bf40ed932bb512f30b0ff31f251af40
|
[
"Apache-2.0"
] | 3
|
2021-09-10T13:21:54.000Z
|
2021-11-23T11:37:55.000Z
|
import src.tasks.task1.utils
import src.tasks.task2.utils
import src.tasks.task3.utils
| 21.75
| 28
| 0.827586
| 15
| 87
| 4.8
| 0.466667
| 0.375
| 0.583333
| 0.527778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0.068966
| 87
| 3
| 29
| 29
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
acb1099e9221683d2e01ff053bcb83b5f0b8869f
| 10,286
|
py
|
Python
|
services/core-api/tests/mines/mine/resources/test_mine_incident_resource.py
|
bcgov/mds
|
6c427a66a5edb4196222607291adef8fd6677038
|
[
"Apache-2.0"
] | 25
|
2018-07-09T19:04:37.000Z
|
2022-03-15T17:27:10.000Z
|
services/core-api/tests/mines/mine/resources/test_mine_incident_resource.py
|
areyeslo/mds
|
e8c38e593e09b78e2a57009c0d003d6c4bfa32e6
|
[
"Apache-2.0"
] | 983
|
2018-04-25T20:08:07.000Z
|
2022-03-31T21:45:20.000Z
|
services/core-api/tests/mines/mine/resources/test_mine_incident_resource.py
|
areyeslo/mds
|
e8c38e593e09b78e2a57009c0d003d6c4bfa32e6
|
[
"Apache-2.0"
] | 58
|
2018-05-15T22:35:50.000Z
|
2021-11-29T19:40:52.000Z
|
import pytest
import json
from datetime import datetime, timedelta
from app.extensions import db
from app.api.incidents.models.mine_incident import MineIncident
from tests.factories import MineFactory
from tests.status_code_gen import SampleDangerousOccurrenceSubparagraphs, RandomIncidentDeterminationTypeCode
# GET
def test_get_mine_incidents_by_mine_guid(test_client, db_session, auth_headers):
test_mine_guid = MineFactory().mine_guid
get_resp = test_client.get(
f'/mines/{test_mine_guid}/incidents', headers=auth_headers['full_auth_header'])
get_data = json.loads(get_resp.data.decode())
assert get_resp.status_code == 200
assert len(get_data['records']) > 0
assert all(i['mine_guid'] == str(test_mine_guid) for i in get_data['records'])
def test_get_mine_incidents_by_guid(test_client, db_session, auth_headers):
test_mine = MineFactory()
test_guid = test_mine.mine_incidents[0].mine_incident_guid
get_resp = test_client.get(
f'/mines/{test_mine.mine_guid}/incidents/{test_guid}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200
get_data = json.loads(get_resp.data.decode())
assert get_data['mine_incident_guid'] == str(test_guid)
# POST
def test_post_mine_incidents_happy(test_client, db_session, auth_headers):
test_mine_guid = MineFactory().mine_guid
now_time_string = datetime.now().strftime("%Y-%m-%d %H:%M")
data = {
'determination_type_code': 'NDO',
'incident_timestamp': now_time_string,
'reported_timestamp': now_time_string,
'incident_description': "Someone got a paper cut",
}
post_resp = test_client.post(
f'/mines/{test_mine_guid}/incidents', json=data, headers=auth_headers['full_auth_header'])
assert post_resp.status_code == 201, post_resp.response
post_data = json.loads(post_resp.data.decode())
assert post_data['mine_guid'] == str(test_mine_guid)
assert post_data['determination_type_code'] == data['determination_type_code']
assert post_data['incident_timestamp'] == now_time_string
# datetime.fromisoformat is in python 3.7
# assert datetime.fromisoformat(post_data['incident_timestamp']) == datetime.strptime(
# data['incident_timestamp'], '%Y-%m-%d %H:%M')
assert post_data['incident_description'] == data['incident_description']
def test_post_mine_incidents_including_optional_fields(test_client, db_session, auth_headers):
test_mine_guid = MineFactory().mine_guid
now_time_string = datetime.now().strftime("%Y-%m-%d %H:%M")
data = {
'determination_type_code': 'NDO',
'incident_timestamp': now_time_string,
'reported_timestamp': now_time_string,
'incident_description': 'Someone got a paper cut',
'mine_determination_type_code': 'NDO',
'mine_determination_representative': 'Billy'
}
post_resp = test_client.post(
f'/mines/{test_mine_guid}/incidents', json=data, headers=auth_headers['full_auth_header'])
assert post_resp.status_code == 201, post_resp.response
post_data = json.loads(post_resp.data.decode())
assert post_data['mine_guid'] == str(test_mine_guid)
assert post_data['determination_type_code'] == data['determination_type_code']
assert post_data['incident_timestamp'] == now_time_string
assert post_data['incident_description'] == data['incident_description']
assert post_data['mine_determination_type_code'] == data['mine_determination_type_code']
assert post_data['mine_determination_representative'] == data[
'mine_determination_representative']
def test_post_mine_incidents_dangerous_occurrence_happy(test_client, db_session, auth_headers):
test_mine_guid = MineFactory().mine_guid
do_subparagraph_count = 2
do_ids = [
sub.compliance_article_id
for sub in SampleDangerousOccurrenceSubparagraphs(do_subparagraph_count)
]
now_time_string = datetime.now().strftime("%Y-%m-%d %H:%M")
data = {
'determination_type_code': 'DO',
'incident_timestamp': now_time_string,
'reported_timestamp': now_time_string,
'incident_description': "Someone got a really bad paper cut",
'dangerous_occurrence_subparagraph_ids': do_ids
}
post_resp = test_client.post(
f'/mines/{test_mine_guid}/incidents', json=data, headers=auth_headers['full_auth_header'])
assert post_resp.status_code == 201, post_resp.response
post_data = json.loads(post_resp.data.decode())
assert post_data['mine_guid'] == str(test_mine_guid)
assert post_data['determination_type_code'] == data['determination_type_code']
assert post_data['incident_timestamp'] == now_time_string
assert post_data['incident_description'] == data['incident_description']
assert set(post_data['dangerous_occurrence_subparagraph_ids']) == set(
data['dangerous_occurrence_subparagraph_ids'])
def test_post_mine_incidents_dangerous_occurrence_no_subs(test_client, db_session, auth_headers):
test_mine_guid = MineFactory().mine_guid
now_time_string = datetime.now().strftime("%Y-%m-%d %H:%M")
data = {
'determination_type_code': 'DO',
'incident_timestamp': now_time_string,
'incident_description': "Someone got a really bad paper cut",
'dangerous_occurrence_subparagraph_ids': []
}
post_resp = test_client.post(
f'/mines/{test_mine_guid}/incidents', json=data, headers=auth_headers['full_auth_header'])
assert post_resp.status_code == 400
# PUT
def test_put_mine_incidents_happy(test_client, db_session, auth_headers):
test_mine = MineFactory()
test_guid = test_mine.mine_incidents[0].mine_incident_guid
new_time_string = (datetime.now() - timedelta(days=1)).strftime("%Y-%m-%d %H:%M")
data = {
'determination_type_code': 'NDO',
'incident_timestamp': new_time_string,
'incident_description': "Someone got a second paper cut",
}
put_resp = test_client.put(
f'/mines/{test_mine.mine_guid}/incidents/{test_guid}',
json=data,
headers=auth_headers['full_auth_header'])
assert put_resp.status_code == 200, put_resp.response
put_data = json.loads(put_resp.data.decode())
assert put_data['determination_type_code'] == data['determination_type_code']
assert put_data['incident_timestamp'] == new_time_string
assert put_data['incident_description'] == data['incident_description']
def test_put_mine_incidents_including_optional_fields(test_client, db_session, auth_headers):
test_mine = MineFactory()
test_guid = test_mine.mine_incidents[0].mine_incident_guid
new_time_string = (datetime.now() - timedelta(days=1)).strftime("%Y-%m-%d %H:%M")
data = {
'determination_type_code': 'NDO',
'incident_timestamp': new_time_string,
'reported_timestamp': new_time_string,
'incident_description': 'Someone got a paper cut',
'mine_determination_type_code': 'NDO',
'mine_determination_representative': 'Billy'
}
put_resp = test_client.put(
f'/mines/{test_mine.mine_guid}/incidents/{test_guid}',
json=data,
headers=auth_headers['full_auth_header'])
assert put_resp.status_code == 200, put_resp.response
put_data = json.loads(put_resp.data.decode())
assert put_data['determination_type_code'] == data['determination_type_code']
assert put_data['incident_timestamp'] == new_time_string
assert put_data['incident_description'] == data['incident_description']
assert put_data['mine_determination_type_code'] == data['mine_determination_type_code']
assert put_data['mine_determination_representative'] == data[
'mine_determination_representative']
def test_put_mine_incidents_dangerous_occurrence_happy(test_client, db_session, auth_headers):
test_mine = MineFactory()
existing_incident_guid = test_mine.mine_incidents[0].mine_incident_guid
do_subparagraph_count = 2
do_ids = [
sub.compliance_article_id
for sub in SampleDangerousOccurrenceSubparagraphs(do_subparagraph_count)
]
new_time_string = (datetime.now() - timedelta(days=1)).strftime("%Y-%m-%d %H:%M")
data = {
'determination_type_code': 'DO',
'incident_timestamp': new_time_string,
'incident_description': "Someone got a really bad paper cut",
'dangerous_occurrence_subparagraph_ids': do_ids
}
put_resp = test_client.put(
f'/mines/{test_mine.mine_guid}/incidents/{existing_incident_guid}',
json=data,
headers=auth_headers['full_auth_header'])
assert put_resp.status_code == 200, put_resp.response
put_data = json.loads(put_resp.data.decode())
assert put_data['determination_type_code'] == data['determination_type_code']
assert put_data['incident_timestamp'] == new_time_string
assert put_data['incident_description'] == data['incident_description']
assert set(put_data['dangerous_occurrence_subparagraph_ids']) == set(
data['dangerous_occurrence_subparagraph_ids'])
def test_put_mine_incidents_dangerous_occurrence_no_subs(test_client, db_session, auth_headers):
test_mine = MineFactory()
existing_incident_guid = test_mine.mine_incidents[0].mine_incident_guid
new_time_string = (datetime.now() - timedelta(days=1)).strftime("%Y-%m-%d %H:%M")
data = {
'determination_type_code': 'DO',
'incident_timestamp': new_time_string,
'incident_description': "Someone got a really bad paper cut",
'dangerous_occurrence_subparagraph_ids': []
}
put_resp = test_client.put(
f'/mines/{test_mine.mine_guid}/incidents/{existing_incident_guid}',
json=data,
headers=auth_headers['full_auth_header'])
assert put_resp.status_code == 400, put_resp.response
# DELETE
def test_delete_mine_incident(test_client, db_session, auth_headers):
test_mine = MineFactory()
test_mine_incident = test_mine.mine_incidents[0]
test_mine_incident_guid = test_mine_incident.mine_incident_guid
delete_resp = test_client.delete(
f'/mines/{test_mine.mine_guid}/incidents/{test_mine_incident_guid}',
headers=auth_headers['full_auth_header'])
assert delete_resp.status_code == 204
| 41.643725
| 109
| 0.727299
| 1,328
| 10,286
| 5.230422
| 0.082078
| 0.041463
| 0.078606
| 0.071984
| 0.90095
| 0.890297
| 0.866974
| 0.864958
| 0.83746
| 0.830982
| 0
| 0.005552
| 0.159537
| 10,286
| 247
| 110
| 41.643725
| 0.797918
| 0.018763
| 0
| 0.748691
| 0
| 0
| 0.295261
| 0.161313
| 0
| 0
| 0
| 0
| 0.21466
| 1
| 0.057592
| false
| 0
| 0.036649
| 0
| 0.094241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
acb5c7063b333e77f3910ed64be666e532ef5aba
| 68,610
|
py
|
Python
|
benchmarks/SimResults/combinations_spec_ml_fulltrained/cmp_povraygromacslibquantumbzip2/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/combinations_spec_ml_fulltrained/cmp_povraygromacslibquantumbzip2/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/combinations_spec_ml_fulltrained/cmp_povraygromacslibquantumbzip2/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.279948,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.422573,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.60725,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.716903,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.24142,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.711987,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.67031,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.462214,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 8.62736,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.303644,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0259883,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.288855,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.192199,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.592499,
'Execution Unit/Register Files/Runtime Dynamic': 0.218188,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.774688,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.75166,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 5.4681,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.0014788,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.0014788,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00129192,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000502249,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00276096,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00701047,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0140397,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.184766,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.403603,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.627549,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.23697,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0781615,
'L2/Runtime Dynamic': 0.0159814,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 6.57514,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.5708,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.172698,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.172698,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 7.39398,
'Load Store Unit/Runtime Dynamic': 3.59519,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.425843,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.851687,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.151133,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.15224,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0663621,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.819774,
'Memory Management Unit/Runtime Dynamic': 0.218602,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 30.4497,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 1.05935,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0494058,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.353949,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 1.4627,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 11.9975,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0353238,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.230434,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.188007,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.256262,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.413341,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.20864,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.878243,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.264266,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.67293,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0355186,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0107488,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0910639,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0794937,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.126582,
'Execution Unit/Register Files/Runtime Dynamic': 0.0902425,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.200679,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.546606,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.1509,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00188265,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00188265,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00167191,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000664796,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00114193,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00657914,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0169028,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0764194,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.86092,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.234489,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.259555,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 7.31535,
'Instruction Fetch Unit/Runtime Dynamic': 0.593945,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0556922,
'L2/Runtime Dynamic': 0.0135868,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.98297,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.33402,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0888347,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0888347,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.40246,
'Load Store Unit/Runtime Dynamic': 1.86096,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.219051,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.438102,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.077742,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0784655,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.302234,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0387752,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.59189,
'Memory Management Unit/Runtime Dynamic': 0.117241,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 20.6278,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0934335,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0126989,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.130391,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.236524,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.97316,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.027942,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.224635,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.151663,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.20755,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.334771,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.168981,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.711303,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.214125,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.5087,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0286524,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0087056,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0733822,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0643832,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.102035,
'Execution Unit/Register Files/Runtime Dynamic': 0.0730888,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.161583,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.414446,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.82885,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00184115,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00184115,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0016431,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000657648,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00092487,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00625027,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0162432,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0618933,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.93694,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.217665,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.210217,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 6.34653,
'Instruction Fetch Unit/Runtime Dynamic': 0.51227,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0632193,
'L2/Runtime Dynamic': 0.015585,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.20236,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.96046,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0635801,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0635802,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.50259,
'Load Store Unit/Runtime Dynamic': 1.3376,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.156778,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.313556,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0556409,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0564981,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.244785,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0359566,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.496475,
'Memory Management Unit/Runtime Dynamic': 0.0924547,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 18.507,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0753717,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0102814,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.104949,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.190602,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.97736,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.00147844,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.20385,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.00846297,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0650334,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.104896,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0529482,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.222878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.073081,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 3.98036,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.00159884,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00272779,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0202592,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0201737,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.021858,
'Execution Unit/Register Files/Runtime Dynamic': 0.0229015,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0430501,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.119009,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 0.974015,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000634581,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000634581,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000558074,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000218968,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000289797,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00211703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00589298,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0193935,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.23359,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0857085,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.065869,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.51198,
'Instruction Fetch Unit/Runtime Dynamic': 0.178981,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0301897,
'L2/Runtime Dynamic': 0.0091083,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.58082,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.17946,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0111196,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0111196,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 1.63333,
'Load Store Unit/Runtime Dynamic': 0.245418,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0274191,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.0548382,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.00973112,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0101837,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0767002,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0140533,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.249525,
'Memory Management Unit/Runtime Dynamic': 0.0242369,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 12.9948,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.00420602,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00298531,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0333926,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0405839,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.47234,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 1.9032713918779498,
'Runtime Dynamic': 1.9032713918779498,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.163028,
'Runtime Dynamic': 0.0807934,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 82.7424,
'Peak Power': 115.855,
'Runtime Dynamic': 22.5012,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 82.5793,
'Total Cores/Runtime Dynamic': 22.4204,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.163028,
'Total L3s/Runtime Dynamic': 0.0807934,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
| 75.065646
| 124
| 0.682087
| 8,082
| 68,610
| 5.784459
| 0.067805
| 0.123551
| 0.112941
| 0.093433
| 0.938396
| 0.929797
| 0.917754
| 0.88492
| 0.860278
| 0.841968
| 0
| 0.131964
| 0.224326
| 68,610
| 914
| 125
| 75.065646
| 0.746482
| 0
| 0
| 0.642232
| 0
| 0
| 0.657402
| 0.048097
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4a184d557cb36dd8d6cbf1416449a0b2120b7544
| 13,939
|
py
|
Python
|
A3C/network.py
|
Xin-Ye-1/HIEM
|
6764f579eef6ec92dd85a005af27419f630df7da
|
[
"Apache-2.0"
] | 2
|
2021-04-12T02:41:00.000Z
|
2021-05-15T02:18:15.000Z
|
A3C/network.py
|
Xin-Ye-1/HIEM
|
6764f579eef6ec92dd85a005af27419f630df7da
|
[
"Apache-2.0"
] | null | null | null |
A3C/network.py
|
Xin-Ye-1/HIEM
|
6764f579eef6ec92dd85a005af27419f630df7da
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
import tensorflow as tf
import tensorflow.contrib.slim as slim
seed = 0
class Lowlevel_Network():
def __init__(self,
window_size,
num_labels,
action_size,
history_steps,
scope='global'
):
with tf.variable_scope(scope):
self.visions = tf.placeholder(shape=[None, history_steps * window_size * window_size, num_labels],
dtype=tf.float32)
self.depths = tf.placeholder(shape=[None, history_steps * window_size * window_size, 1], dtype=tf.float32)
self.targets = tf.placeholder(shape=[None, num_labels], dtype=tf.float32)
targets_expanded = tf.tile(tf.expand_dims(self.targets, 1),
[1, history_steps * window_size * window_size, 1])
masked_visions = tf.reduce_sum(self.visions * targets_expanded, axis=-1)
masked_visions = slim.flatten(masked_visions)
depths = slim.flatten(self.depths)
hidden_visions = slim.fully_connected(inputs=masked_visions,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope='vision_hidden')
hidden_depths = slim.fully_connected(inputs=depths,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope='depth_hidden')
vision_depth_feature = tf.concat([hidden_visions, hidden_depths], 1)
embed_feature = slim.fully_connected(inputs=vision_depth_feature,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope='embed')
# policy estimation
hidden_policy = slim.fully_connected(inputs=embed_feature,
num_outputs=20,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope='policy_hidden')
self.policy = slim.fully_connected(inputs=hidden_policy,
num_outputs=action_size,
activation_fn=tf.nn.softmax,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope='policy')
# value estimation
hidden_value = slim.fully_connected(inputs=embed_feature,
num_outputs=20,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope='value_hidden')
self.value = slim.fully_connected(inputs=hidden_value,
num_outputs=1,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope='value')
# Lowlevel training
self.chosen_actions = tf.placeholder(shape=[None], dtype=tf.int32)
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
self.target_values = tf.placeholder(shape=[None], dtype=tf.float32)
self.lowlevel_lr = tf.placeholder(dtype=tf.float32)
self.er = tf.placeholder(dtype=tf.float32)
actions_onehot = tf.one_hot(self.chosen_actions, action_size, dtype=tf.float32)
log_policy = tf.log(tf.clip_by_value(self.policy, 0.000001, 0.999999))
log_pi_for_action = tf.reduce_sum(tf.multiply(log_policy, actions_onehot), axis=1)
self.value_loss = 0.5 * tf.reduce_mean(tf.square(self.target_values - self.value))
self.policy_loss = -tf.reduce_mean(log_pi_for_action * self.advantages)
self.entropy_loss = -tf.reduce_mean(tf.reduce_sum(self.policy * (-log_policy), axis=1))
self.lowlevel_loss = self.value_loss + self.policy_loss + self.er * self.entropy_loss
local_lowlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
gradients = tf.gradients(self.lowlevel_loss, local_lowlevel_params)
norm_gradients, _ = tf.clip_by_global_norm(gradients, 40.0)
lowlevel_trainer = tf.train.RMSPropOptimizer(learning_rate=self.lowlevel_lr)
global_lowlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global')
self.lowlevel_update = lowlevel_trainer.apply_gradients(zip(norm_gradients, global_lowlevel_params))
def fc2d(inputs,
num_outputs,
activation_fn,
scope):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE) as s:
n0, n1, n2 = inputs.get_shape().as_list()
weights = tf.get_variable(name='weights',
shape=[n2, num_outputs],
initializer=tf.contrib.layers.xavier_initializer(seed=seed),
trainable=True)
wx = tf.einsum('ijk,kl->ijl', inputs, weights)
biases = tf.get_variable(name='biases',
shape=[num_outputs],
initializer=tf.zeros_initializer(),
trainable=True)
wx_b = wx + biases
result = wx_b if activation_fn is None else activation_fn(wx_b, name=s.name)
return result
class Lowlevel_Network_full():
def __init__(self,
window_size,
num_labels,
action_size,
history_steps,
scope='global'
):
with tf.variable_scope(scope):
self.visions = tf.placeholder(shape=[None, history_steps * window_size * window_size, num_labels],
dtype=tf.float32)
self.depths = tf.placeholder(shape=[None, history_steps * window_size * window_size, 1],
dtype=tf.float32)
self.targets = tf.placeholder(shape=[None, num_labels], dtype=tf.float32)
related_visions = fc2d(inputs=self.visions,
num_outputs=1,
activation_fn=None,
scope='vision_preprocess')
related_visions = slim.flatten(related_visions)
depths = slim.flatten(self.depths)
hidden_visions = slim.fully_connected(inputs=related_visions,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(
seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='vision_hidden')
hidden_depths = slim.fully_connected(inputs=depths,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(
seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='depth_hidden')
hidden_targets = slim.fully_connected(inputs=self.targets,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(
seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='target_hidden')
vision_depth_feature = tf.concat([hidden_visions, hidden_depths, hidden_targets], -1)
embed_feature = slim.fully_connected(inputs=vision_depth_feature,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='embed')
# policy estimation
hidden_policy = slim.fully_connected(inputs=embed_feature,
num_outputs=20,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='policy_hidden')
self.policy = slim.fully_connected(inputs=hidden_policy,
num_outputs=action_size,
activation_fn=tf.nn.softmax,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='policy')
# value estimation
hidden_value = slim.fully_connected(inputs=embed_feature,
num_outputs=20,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='value_hidden')
self.value = slim.fully_connected(inputs=hidden_value,
num_outputs=1,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='value')
# Lowlevel training
if not scope.startswith('global'):
self.chosen_actions = tf.placeholder(shape=[None], dtype=tf.int32)
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
self.target_values = tf.placeholder(shape=[None], dtype=tf.float32)
self.lowlevel_lr = tf.placeholder(dtype=tf.float32)
self.er = tf.placeholder(dtype=tf.float32)
actions_onehot = tf.one_hot(self.chosen_actions, action_size, dtype=tf.float32)
log_policy = tf.log(tf.clip_by_value(self.policy, 0.000001, 0.999999))
log_pi_for_action = tf.reduce_sum(tf.multiply(log_policy, actions_onehot), axis=1)
self.value_loss = 0.5 * tf.reduce_mean(tf.square(self.target_values - self.value))
self.policy_loss = -tf.reduce_mean(log_pi_for_action * self.advantages)
self.entropy_loss = -tf.reduce_mean(tf.reduce_sum(self.policy * (-log_policy), axis=1))
self.lowlevel_loss = self.value_loss + self.policy_loss + self.er * self.entropy_loss
local_lowlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
gradients = tf.gradients(self.lowlevel_loss, local_lowlevel_params)
norm_gradients, _ = tf.clip_by_global_norm(gradients, 40.0)
lowlevel_trainer = tf.train.RMSPropOptimizer(learning_rate=self.lowlevel_lr)
global_lowlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global')
self.lowlevel_update = lowlevel_trainer.apply_gradients(zip(norm_gradients, global_lowlevel_params))
| 52.402256
| 118
| 0.493794
| 1,229
| 13,939
| 5.328723
| 0.115541
| 0.063521
| 0.034204
| 0.063521
| 0.880134
| 0.87647
| 0.872347
| 0.867308
| 0.857077
| 0.852191
| 0
| 0.015767
| 0.431236
| 13,939
| 265
| 119
| 52.6
| 0.810293
| 0.009111
| 0
| 0.806283
| 0
| 0
| 0.015665
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015707
| false
| 0
| 0.010471
| 0
| 0.041885
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a889da6d0073ae3a89671756544dd80298391fcf
| 39,493
|
py
|
Python
|
testing/test_transatomic.py
|
KWR-Water/greta
|
ddeb23ac25b5d6efb1a00a99e6671a63eb654c22
|
[
"MIT"
] | null | null | null |
testing/test_transatomic.py
|
KWR-Water/greta
|
ddeb23ac25b5d6efb1a00a99e6671a63eb654c22
|
[
"MIT"
] | null | null | null |
testing/test_transatomic.py
|
KWR-Water/greta
|
ddeb23ac25b5d6efb1a00a99e6671a63eb654c22
|
[
"MIT"
] | null | null | null |
#%%
import pytest
from pandas import read_csv
import pandas as pd
import os
# path = os.getcwd() # path of working directory
from pathlib import Path
# try:
# from project_path import module_path #the dot says looik in the current folder, this project_path.py file must be in the folder here
# except ModuleNotFoundError:
# from project_path import module_path
from greta.Analytical_Well import *
from greta.Substance_Transport import *
from pandas.testing import assert_frame_equal
import warnings
# get directory of this file
path = Path(__file__).parent #os.getcwd() #path of working directory
#%%
def test_travel_time_distribution_phreatic():
""" Compares the calculated travel times (total, unsaturated zone, shallow aquifer and target aquifer)
against a known case from TRANSATOMIC excel """
output_phreatic = pd.read_csv(path / 'phreatic_test.csv')
output_phreatic = output_phreatic.round(7) #round to 7 digits (or any digit), keep same as for the output for the model to compare
test_ = HydroChemicalSchematisation(schematisation_type='phreatic',
computation_method= 'analytical',
what_to_export='omp', # @alex: what_to_export sounds very cryptic and ad-hoc. maybe we can think of something better
well_discharge=-319.4*24,
# vertical_resistance_shallow_aquifer=500,
hor_permeability_shallow_aquifer = 0.02,
porosity_vadose_zone=0.38,
porosity_shallow_aquifer=0.35,
porosity_target_aquifer=0.35,
recharge_rate=0.3/365.25,
moisture_content_vadose_zone=0.15,
ground_surface = 22,
thickness_vadose_zone_at_boundary=5,
thickness_shallow_aquifer=10,
thickness_target_aquifer=40,
hor_permeability_target_aquifer=35,
# KD=1400,
thickness_full_capillary_fringe=0.4,
temperature=11,
solid_density_vadose_zone= 2.650,
solid_density_shallow_aquifer= 2.650,
solid_density_target_aquifer= 2.650,
diameter_borehole = 0.75,
)
well1 = AnalyticalWell(test_)
well1.phreatic()
output = well1.df_output
output = output[["total_travel_time", "travel_time_unsaturated",
"travel_time_shallow_aquifer", "travel_time_target_aquifer",
"radial_distance", ]]
output = output.round(7)
assert_frame_equal(output, output_phreatic,check_dtype=False)
def test_retardation_temp_koc_correction(substance = 'benzene', schematisation_type='phreatic'):
""" Compares the calculated retardation coefficient for each redox zone against a known case from TRANSATOMIC excel """
test_ = HydroChemicalSchematisation(schematisation_type=schematisation_type,
computation_method= 'analytical',
what_to_export='omp',
well_discharge=-319.4*24,
hor_permeability_shallow_aquifer = 0.02,
porosity_vadose_zone=0.38,
porosity_shallow_aquifer=0.35,
porosity_target_aquifer=0.35,
recharge_rate=0.3/365.25,
moisture_content_vadose_zone=0.15,
ground_surface = 22,
thickness_vadose_zone_at_boundary=5,
thickness_shallow_aquifer=10,
thickness_target_aquifer=40,
hor_permeability_target_aquifer=35,
thickness_full_capillary_fringe=0.4,
redox_vadose_zone='anoxic', #'suboxic',
redox_shallow_aquifer='anoxic',
redox_target_aquifer='deeply_anoxic',
pH_vadose_zone=5,
pH_shallow_aquifer=6,
pH_target_aquifer=7,
dissolved_organic_carbon_vadose_zone=10,
dissolved_organic_carbon_shallow_aquifer=4,
dissolved_organic_carbon_target_aquifer=2,
fraction_organic_carbon_vadose_zone=0.001,
fraction_organic_carbon_shallow_aquifer=0.0005,
fraction_organic_carbon_target_aquifer=0.0005,
diffuse_input_concentration = 100,
temperature=11,
solid_density_vadose_zone= 2.650,
solid_density_shallow_aquifer= 2.650,
solid_density_target_aquifer= 2.650,
diameter_borehole = 0.75,
)
well1 = AnalyticalWell(test_)
if schematisation_type=='phreatic':
well1.phreatic()
elif schematisation_type=='semiconfined':
well1.semiconfined()
conc1 = SubstanceTransport(well1, substance = substance) #, df_particle, df_flowline)
conc1.compute_omp_removal()
retardation = {
'benzene': {
'vadose_zone': 1.57866594,
'shallow_aquifer': 1.32938582,
'target_aquifer': 1.32940346,
},
'benzo(a)pyrene': {
'vadose_zone': 1939.142373,
'shallow_aquifer': 2388.097816,
'target_aquifer': 3901.698980,
},
'AMPA' :{
'vadose_zone': 1.0000000763015349,
'shallow_aquifer': 1.000000004342605, #1.0000000004342615,
'target_aquifer': 1.0000000004342615,
},
}
retardation_array = np.array([retardation[substance]['vadose_zone'],
retardation[substance]['shallow_aquifer'],
retardation[substance]['target_aquifer']])
test_array = np.array(conc1.df_particle.retardation.loc[1:3], dtype='float')
try:
# assert output == output_phreatic
np.testing.assert_allclose(test_array,
retardation_array ),
# rtol=1e-8, atol=1e-8)
except AssertionError:
print("Assertion Exception Raised - retardation test")
else:
print("Success, no error in retardation!")
def test_steady_concentration_temp_koc_correction_phreatic(substance='benzene'):
""" Compares the calculated steady state concentration for a specific radial distance
for each redox zone against a known case from TRANSATOMIC excel """
test_ = HydroChemicalSchematisation(schematisation_type='phreatic',
computation_method= 'analytical',
what_to_export='omp',
well_discharge=-319.4*24,
# vertical_resistance_shallow_aquifer=500,
hor_permeability_shallow_aquifer = 0.02,
porosity_vadose_zone=0.38,
porosity_shallow_aquifer=0.35,
porosity_target_aquifer=0.35,
recharge_rate=0.3/365.25,
moisture_content_vadose_zone=0.15,
ground_surface = 22,
thickness_vadose_zone_at_boundary=5,
thickness_shallow_aquifer=10,
thickness_target_aquifer=40,
hor_permeability_target_aquifer=35,
thickness_full_capillary_fringe=0.4,
redox_vadose_zone='anoxic', #'suboxic',
redox_shallow_aquifer='anoxic',
redox_target_aquifer='deeply_anoxic',
pH_vadose_zone=5,
pH_shallow_aquifer=6,
pH_target_aquifer=7,
dissolved_organic_carbon_vadose_zone=10,
dissolved_organic_carbon_shallow_aquifer=4,
dissolved_organic_carbon_target_aquifer=2,
fraction_organic_carbon_vadose_zone=0.001,
fraction_organic_carbon_shallow_aquifer=0.0005,
fraction_organic_carbon_target_aquifer=0.0005,
diffuse_input_concentration = 100,
temperature=11,
solid_density_vadose_zone= 2.650,
solid_density_shallow_aquifer= 2.650,
solid_density_target_aquifer= 2.650,
diameter_borehole = 0.75,
)
well1 = AnalyticalWell(test_)
well1.phreatic()
# substance = 'benzene'
conc1 = SubstanceTransport(well1, substance = substance) #, df_particle, df_flowline)
conc1.compute_omp_removal()
steady_state_concentration = {
'benzene': {
'vadose_zone': 10.744926872632352,
'shallow_aquifer': 1.3763989974870514,
'target_aquifer': 1.3763989974870514,
},
'benzo(a)pyrene': {
'vadose_zone': 0,
'shallow_aquifer': 0,
'target_aquifer': 0,
},
'AMPA' :{
'vadose_zone': 0.000249362,
'shallow_aquifer': 1.850450098e-10,#1.8504500983690007e-10,
'target_aquifer': 1.850450098e-10, #1.8504500983690007e-10,
},
}
concentration_array = np.array([steady_state_concentration[substance]['vadose_zone'],
steady_state_concentration[substance]['shallow_aquifer'],
steady_state_concentration[substance]['target_aquifer']])
test_array = np.array(conc1.df_particle.steady_state_concentration.loc[1:3], dtype=float)
try:
# assert output == output_phreatic
# assert_frame_equal(test_array,concentration_array,check_dtype=False)
np.testing.assert_allclose(test_array,
concentration_array,
rtol=1e-8, atol=1e-8)
except AssertionError:
print("Assertion Exception Raised - concetration test")
else:
print("Success, no error in concetration!")
# %%
def test_travel_time_distribution_semiconfined():
""" Compares the calculated travel times (total, unsaturated zone, shallow aquifer and target aquifer)
against a known case from TRANSATOMIC excel """
# output_semiconfined = pd.read_csv(path / 'semiconfined_test.csv')
output_semiconfined = pd.read_csv(path / 'semiconfined_test_fixed_TTD.csv')
output_semiconfined = output_semiconfined.round(7)
test_ = HydroChemicalSchematisation(schematisation_type='semiconfined',
computation_method= 'analytical',
what_to_export='omp',
well_discharge=-319.4*24,
# vertical_resistance_shallow_aquifer=500,
hor_permeability_shallow_aquifer = 0.02,
porosity_vadose_zone=0.38,
porosity_shallow_aquifer=0.35,
porosity_target_aquifer=0.35,
recharge_rate=0.3/365.25,
moisture_content_vadose_zone=0.15,
ground_surface = 22,
thickness_vadose_zone_at_boundary=5,
thickness_shallow_aquifer=10,
thickness_target_aquifer=40,
hor_permeability_target_aquifer=35,
# KD=1400,
thickness_full_capillary_fringe=0.4,
temperature=11,
solid_density_vadose_zone= 2.650,
solid_density_shallow_aquifer= 2.650,
solid_density_target_aquifer= 2.650,
diameter_borehole = 0.75,)
well1 = AnalyticalWell(test_)
well1.semiconfined()
output = well1.df_output
# output = output_dict['df_output']
output = output[["total_travel_time", "travel_time_unsaturated",
"travel_time_shallow_aquifer", "travel_time_target_aquifer",
"radial_distance",]]
# try:
# assert output == output_semiconfirned
assert_frame_equal(output, output_semiconfined,
check_dtype=False)
# assert output ==1
# except AssertionError:
# print("Assertion Exception Raised - in TTD test")
# else:
# print("Success, no error in TTD!")
def test_steady_concentration_temp_koc_correction_semiconfined(substance='benzene'):
""" Compares the calculated retardation coefficient for each redox zone against a known case from TRANSATOMIC excel """
test_ = HydroChemicalSchematisation(schematisation_type='semiconfined',
computation_method= 'analytical',
what_to_export='omp',
well_discharge=-319.4*24,
hor_permeability_shallow_aquifer = 0.02,
porosity_vadose_zone=0.38,
porosity_shallow_aquifer=0.35,
porosity_target_aquifer=0.35,
recharge_rate=0.3/365.25,
moisture_content_vadose_zone=0.15,
ground_surface = 22,
thickness_vadose_zone_at_boundary=5,
thickness_shallow_aquifer=10,
thickness_target_aquifer=40,
hor_permeability_target_aquifer=35,
thickness_full_capillary_fringe=0.4,
redox_vadose_zone='anoxic', #'suboxic',
redox_shallow_aquifer='anoxic',
redox_target_aquifer='deeply_anoxic',
pH_vadose_zone=5,
pH_shallow_aquifer=6,
pH_target_aquifer=7,
dissolved_organic_carbon_vadose_zone=10,
dissolved_organic_carbon_shallow_aquifer=4,
dissolved_organic_carbon_target_aquifer=2,
fraction_organic_carbon_vadose_zone=0.001,
fraction_organic_carbon_shallow_aquifer=0.0005,
fraction_organic_carbon_target_aquifer=0.0005,
diffuse_input_concentration = 100,
temperature=11,
solid_density_vadose_zone= 2.650,
solid_density_shallow_aquifer= 2.650,
solid_density_target_aquifer= 2.650,
diameter_borehole = 0.75,
)
well1 = AnalyticalWell(test_)
well1.semiconfined()
# substance = 'benzene'
conc1 = SubstanceTransport(well1, substance = substance) #, df_particle, df_flowline)
conc1.compute_omp_removal()
steady_state_concentration = {
'benzene': {
'vadose_zone': 30.78934144,
'shallow_aquifer': 21.11155403,
'target_aquifer': 21.11155403,
},
'benzo(a)pyrene': {
'vadose_zone': 0,
'shallow_aquifer': 0,
'target_aquifer': 0,
},
'AMPA' :{
'vadose_zone': 0.109923889,
'shallow_aquifer': 0.008232593,
'target_aquifer':0.008232593,
},
}
concentration_array = np.array([steady_state_concentration[substance]['vadose_zone'],
steady_state_concentration[substance]['shallow_aquifer'],
steady_state_concentration[substance]['target_aquifer']])
test_array = np.array(conc1.df_particle.steady_state_concentration.loc[1:3], dtype=float)
try:
# assert output == output_phreatic
# assert_frame_equal(test_array,concentration_array,check_dtype=False)
np.testing.assert_allclose(test_array,
concentration_array,
rtol=1e-8, atol=1e-8)
except AssertionError:
print("Assertion Exception Raised - concetration test")
else:
print("Success, no error in concetration!")
# %%
def test_start_end_dates_contamination():
''' Tests whether the correct exception is raised when the 'end_date_contamiantion' is before 'start_date_contamination' '''
with pytest.raises(ValueError) as exc:
phreatic_scheme = HydroChemicalSchematisation(schematisation_type='phreatic',
computation_method= 'analytical',
what_to_export='omp',
well_discharge=-319.4*24, #m3/day
recharge_rate=0.3/365.25, #m/day
start_date_contamination= dt.datetime.strptime('1990-01-01', "%Y-%m-%d") ,
end_date_contamination= dt.datetime.strptime('1950-01-01', "%Y-%m-%d"), #'1950-01-01'
)
assert 'Error, "end_date_contamination" is before "start_date_contamination". Please enter an new "end_date_contamination" or "start_date_contamination" ' in str(exc.value)
#%%
def test_compute_for_date_start_dates_contamination():
''' Tests whether the correct exception is raised when the 'computer_contamiantion_for_date' is before 'start_date_contamination' '''
with pytest.raises(ValueError) as exc:
phreatic_scheme = HydroChemicalSchematisation(schematisation_type='phreatic',
computation_method= 'analytical',
what_to_export='omp',
well_discharge=-319.4*24, #m3/day
recharge_rate=0.3/365.25, #m/day
start_date_contamination=dt.datetime.strptime('1960-01-01', "%Y-%m-%d") ,
end_date_contamination= dt.datetime.strptime('1990-01-01', "%Y-%m-%d"),
compute_contamination_for_date= dt.datetime.strptime('1950-01-01', "%Y-%m-%d")
)
assert 'Error, "compute_contamination_for_date" is before "start_date_contamination". Please enter an new "compute_contamination_for_date" or "start_date_contamination" ' in str(exc.value)
#%%
def test_compute_for_date_start_date_well():
''' Tests whether the correct exception is raised when the
'computer_contamiantion_for_date' is before 'start_date_contamination' '''
with pytest.raises(ValueError) as exc:
phreatic_scheme = HydroChemicalSchematisation(schematisation_type='phreatic',
computation_method= 'analytical',
what_to_export='omp',
well_discharge=-319.4*24, #m3/day
recharge_rate=0.3/365.25, #m/day
start_date_contamination=dt.datetime.strptime('1950-01-01', "%Y-%m-%d") ,
end_date_contamination= dt.datetime.strptime('1990-01-01', "%Y-%m-%d"),
compute_contamination_for_date= dt.datetime.strptime('1960-01-01', "%Y-%m-%d"),
start_date_well= dt.datetime.strptime('1975-01-01', "%Y-%m-%d"),
)
assert 'Error, "compute_contamination_for_date" is before "start_date_well". Please enter an new "compute_contamination_for_date" or "start_date_well" ' in str(exc.value)
#%%
def test_incorrect_date_input_format():
''' Tests whether the correct exception is raised when the
'computer_contamiantion_for_date' is before 'start_date_contamination' '''
with pytest.raises(TypeError) as exc:
phreatic_scheme = HydroChemicalSchematisation(schematisation_type='phreatic',
computation_method= 'analytical',
what_to_export='omp',
well_discharge=-319.4*24, #m3/day
recharge_rate=0.3/365.25, #m/day
start_date_well='1950-01-01',
)
assert "Error invalid date input, please enter a new start_date_well using the format dt.datetime.strptime('YYYY-MM-DD', '%Y-%m-%d')" in str(exc.value)
#%%
def test_redox_options():
''' Tests whether the correct exception is raised when one of the redox zones
is not one of'suboxic', 'anoxic', 'deeply_anoxic' '''
with pytest.raises(ValueError) as exc:
phreatic_scheme = HydroChemicalSchematisation(schematisation_type='phreatic',
computation_method= 'analytical',
what_to_export='omp',
well_discharge=-319.4*24, #m3/day
recharge_rate=0.3/365.25, #m/day
redox_vadose_zone='oxic',
redox_shallow_aquifer='anoxic',
redox_target_aquifer='deeply_anoxic',
)
assert "Invalid redox_vadose_zone. Expected one of: ['suboxic', 'anoxic', 'deeply_anoxic']" in str(exc.value)
#%%
def test_phreatic_diffuse_point_source():
''' Test for phreatic case with both a diffuse and point source contamination,
Checks if the concentration over time matches a known case'''
phreatic_scheme = HydroChemicalSchematisation(schematisation_type='phreatic',
computation_method= 'analytical',
what_to_export='omp',
well_discharge=-319.4*24, #m3/day
porosity_vadose_zone=0.38,
porosity_shallow_aquifer=0.35,
porosity_target_aquifer=0.35,
recharge_rate=0.3/365.25, #m/day
moisture_content_vadose_zone=0.15,
ground_surface=22,
thickness_vadose_zone_at_boundary=5,
thickness_shallow_aquifer=10,
thickness_target_aquifer=40,
hor_permeability_target_aquifer=35,
thickness_full_capillary_fringe=0.4,
redox_vadose_zone='suboxic',
redox_shallow_aquifer='anoxic',
redox_target_aquifer='deeply_anoxic',
pH_vadose_zone=5,
pH_shallow_aquifer=6,
pH_target_aquifer=7,
dissolved_organic_carbon_vadose_zone=10,
dissolved_organic_carbon_shallow_aquifer=4,
dissolved_organic_carbon_target_aquifer=2,
fraction_organic_carbon_vadose_zone=0.001,
fraction_organic_carbon_shallow_aquifer=0.0005,
fraction_organic_carbon_target_aquifer=0.0005,
temperature=11,
solid_density_vadose_zone=2.650,
solid_density_shallow_aquifer=2.650,
solid_density_target_aquifer=2.650,
diameter_borehole=0.75,
#diffuse parameters
diffuse_input_concentration=100, #ug/L
#point paramters
point_input_concentration=100,
distance_point_contamination_from_well=25,
depth_point_contamination=21, #m ASL
discharge_point_contamination=-1000,
#dates
start_date_well=dt.datetime.strptime('1968-01-01',"%Y-%m-%d"),
start_date_contamination= dt.datetime.strptime('1966-01-01',"%Y-%m-%d"),
compute_contamination_for_date=dt.datetime.strptime('2050-01-01',"%Y-%m-%d"),
end_date_contamination=dt.datetime.strptime('1990-01-01',"%Y-%m-%d"),
)
phreatic_well = AnalyticalWell(phreatic_scheme)
phreatic_well.phreatic()
phreatic_conc = SubstanceTransport(phreatic_well, substance = 'OMP-X')
phreatic_conc.compute_omp_removal()
df_well_concentration = phreatic_conc.compute_concentration_in_well_at_date()
df_well_concentration_test = read_csv(path / 'phreatic_diffuse_point_test.csv', index_col=0)
# AH the assert frame_equal is being difficult, so have to specify each data type
df_well_concentration_test = df_well_concentration_test.astype({'time': 'int32', 'date': 'datetime64[ns]', 'total_concentration_in_well': 'float64'}) #etc
assert_frame_equal(df_well_concentration, df_well_concentration_test, check_dtype=False)
#%%
def test_phreatic_diffuse_only_source():
''' Test for phreatic case with only a diffuse source contamination
Checks if the concentration over time matches a known case'''
phreatic_scheme = HydroChemicalSchematisation(schematisation_type='phreatic',
computation_method= 'analytical',
what_to_export='omp',
well_discharge=-319.4*24, #m3/day
porosity_vadose_zone=0.38,
porosity_shallow_aquifer=0.35,
porosity_target_aquifer=0.35,
recharge_rate=0.3/365.25, #m/day
moisture_content_vadose_zone=0.15,
ground_surface=22,
thickness_vadose_zone_at_boundary=5,
thickness_shallow_aquifer=10,
thickness_target_aquifer=40,
hor_permeability_target_aquifer=35,
thickness_full_capillary_fringe=0.4,
redox_vadose_zone='suboxic',
redox_shallow_aquifer='anoxic',
redox_target_aquifer='deeply_anoxic',
pH_vadose_zone=5,
pH_shallow_aquifer=6,
pH_target_aquifer=7,
dissolved_organic_carbon_vadose_zone=10,
dissolved_organic_carbon_shallow_aquifer=4,
dissolved_organic_carbon_target_aquifer=2,
fraction_organic_carbon_vadose_zone=0.001,
fraction_organic_carbon_shallow_aquifer=0.0005,
fraction_organic_carbon_target_aquifer=0.0005,
temperature=11,
solid_density_vadose_zone=2.650,
solid_density_shallow_aquifer=2.650,
solid_density_target_aquifer=2.650,
diameter_borehole=0.75,
#diffuse parameters
diffuse_input_concentration=100, #ug/L
#dates
start_date_well=dt.datetime.strptime('1968-01-01',"%Y-%m-%d"),
start_date_contamination= dt.datetime.strptime('1966-01-01',"%Y-%m-%d"),
compute_contamination_for_date=dt.datetime.strptime('2050-01-01',"%Y-%m-%d"),
end_date_contamination=dt.datetime.strptime('1990-01-01',"%Y-%m-%d"),
)
phreatic_well = AnalyticalWell(phreatic_scheme)
phreatic_well.phreatic()
phreatic_conc = SubstanceTransport(phreatic_well, substance = 'OMP-X')
phreatic_conc.compute_omp_removal()
df_well_concentration = phreatic_conc.compute_concentration_in_well_at_date()
df_well_concentration_test = read_csv(path / 'phreatic_diffuse_only_test.csv', index_col=0)
# AH the assert frame_equal is being difficult, so have to specify each data type
df_well_concentration_test = df_well_concentration_test.astype({'time': 'int32', 'date': 'datetime64[ns]', 'total_concentration_in_well': 'float64'})
assert_frame_equal(df_well_concentration, df_well_concentration_test, check_dtype=False)
#%%
def test_phreatic_point_only_source():
''' Test for phreatic case with only a diffuse source contamination,
Checks if the concentration over time matches a known case'''
phreatic_scheme = HydroChemicalSchematisation(schematisation_type='phreatic',
computation_method= 'analytical',
what_to_export='omp',
well_discharge=-319.4*24, #m3/day
porosity_vadose_zone=0.38,
porosity_shallow_aquifer=0.35,
porosity_target_aquifer=0.35,
recharge_rate=0.3/365.25, #m/day
moisture_content_vadose_zone=0.15,
ground_surface=22,
thickness_vadose_zone_at_boundary=5,
thickness_shallow_aquifer=10,
thickness_target_aquifer=40,
hor_permeability_target_aquifer=35,
thickness_full_capillary_fringe=0.4,
redox_vadose_zone='suboxic',
redox_shallow_aquifer='anoxic',
redox_target_aquifer='deeply_anoxic',
pH_vadose_zone=5,
pH_shallow_aquifer=6,
pH_target_aquifer=7,
dissolved_organic_carbon_vadose_zone=10,
dissolved_organic_carbon_shallow_aquifer=4,
dissolved_organic_carbon_target_aquifer=2,
fraction_organic_carbon_vadose_zone=0.001,
fraction_organic_carbon_shallow_aquifer=0.0005,
fraction_organic_carbon_target_aquifer=0.0005,
temperature=11,
solid_density_vadose_zone=2.650,
solid_density_shallow_aquifer=2.650,
solid_density_target_aquifer=2.650,
diameter_borehole=0.75,
#diffuse parameters
diffuse_input_concentration=0, #ug/L
#point paramters
point_input_concentration=100,
distance_point_contamination_from_well=25,
depth_point_contamination=21, #m ASL
discharge_point_contamination=-1000,
#dates
start_date_well=dt.datetime.strptime('1968-01-01', "%Y-%m-%d"),
start_date_contamination= dt.datetime.strptime('1966-01-01', "%Y-%m-%d"),
compute_contamination_for_date=dt.datetime.strptime('2050-01-01', "%Y-%m-%d"),
end_date_contamination=dt.datetime.strptime('1990-01-01', "%Y-%m-%d"),
)
phreatic_well = AnalyticalWell(phreatic_scheme)
phreatic_well.phreatic()
phreatic_conc = SubstanceTransport(phreatic_well, substance = 'OMP-X')
phreatic_conc.compute_omp_removal()
df_well_concentration = phreatic_conc.compute_concentration_in_well_at_date()
df_well_concentration.to_csv('phreatic_point_only_test.csv')
df_well_concentration_test = read_csv(path / 'phreatic_point_only_test.csv', index_col=0)
# AH the assert frame_equal is being difficult, so have to specify each data type
df_well_concentration_test = df_well_concentration_test.astype({'time': 'int32', 'date': 'datetime64[ns]', 'total_concentration_in_well': 'float64'})
assert_frame_equal(df_well_concentration, df_well_concentration_test, check_dtype=False)
def test_drawdown_lower_than_target_aquifer():
''' Tests whether the correct exception is raised when the drawdown of the
well is lower than the bottom of the target aquifer' '''
with pytest.raises(ValueError) as exc:
phreatic_scheme = HydroChemicalSchematisation(schematisation_type='phreatic',
computation_method= 'analytical',
what_to_export='omp', # @alex: what_to_export sounds very cryptic and ad-hoc. maybe we can think of something better
well_discharge=-319.4*24,
# vertical_resistance_shallow_aquifer=500,
hor_permeability_shallow_aquifer = 0.02,
porosity_vadose_zone=0.38,
porosity_shallow_aquifer=0.35,
porosity_target_aquifer=0.35,
recharge_rate=0.3/365.25,
moisture_content_vadose_zone=0.15,
ground_surface = 22,)
phreatic_well = AnalyticalWell(phreatic_scheme)
phreatic_well.phreatic()
assert "The drawdown at the well is lower than the bottom of the target aquifer. Please select a different schematisation." in str(exc.value)
# def test_warning_drawdown_in_target_aquifer():
# ''' Tests whether a warning is issued when the head drawdown reaches the target aquifer' '''
# @MartinK how to raise a warning here?
# with AnalyticalWell.assertWarns(Warning) as exc:
# phreatic_scheme = HydroChemicalSchematisation(schematisation_type='phreatic',
# computation_method= 'analytical',
# what_to_export='omp', # @alex: what_to_export sounds very cryptic and ad-hoc. maybe we can think of something better
# well_discharge=-319.4*24,
# # vertical_resistance_shallow_aquifer=500,
# hor_permeability_shallow_aquifer = 0.02,
# porosity_vadose_zone=0.38,
# porosity_shallow_aquifer=0.35,
# porosity_target_aquifer=0.35,
# recharge_rate=0.3/365.25,
# moisture_content_vadose_zone=0.15,
# ground_surface = 22,
# thickness_vadose_zone_at_boundary=1,
# thickness_shallow_aquifer=1,
# thickness_target_aquifer=20,
# hor_permeability_target_aquifer=35,
# thickness_full_capillary_fringe=0.4,
# temperature=11,
# solid_density_vadose_zone= 2.650,
# solid_density_shallow_aquifer= 2.650,
# solid_density_target_aquifer= 2.650,
# diameter_borehole = 0.75,
# )
# assert 'The drawdown is lower than the bottom of the shallow aquifer' in str(exc.value)
| 58.249263
| 192
| 0.509103
| 3,527
| 39,493
| 5.360079
| 0.096116
| 0.057075
| 0.017456
| 0.006665
| 0.881566
| 0.867231
| 0.844168
| 0.830521
| 0.818196
| 0.808728
| 0
| 0.05914
| 0.421999
| 39,493
| 677
| 193
| 58.335303
| 0.769045
| 0.148634
| 0
| 0.781496
| 0
| 0.009843
| 0.09147
| 0.020883
| 0
| 0
| 0
| 0
| 0.041339
| 1
| 0.027559
| false
| 0
| 0.017717
| 0
| 0.045276
| 0.011811
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a8d274983dfdaadbd2bd3de2a15e984cea35a9ce
| 15
|
py
|
Python
|
test.py
|
aaronhubik/stat624demo
|
9b3d7651bef098db75fece584dfa8143164ff243
|
[
"MIT"
] | null | null | null |
test.py
|
aaronhubik/stat624demo
|
9b3d7651bef098db75fece584dfa8143164ff243
|
[
"MIT"
] | null | null | null |
test.py
|
aaronhubik/stat624demo
|
9b3d7651bef098db75fece584dfa8143164ff243
|
[
"MIT"
] | null | null | null |
echo "x=5"
2+2
| 5
| 10
| 0.533333
| 5
| 15
| 1.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.2
| 15
| 3
| 11
| 5
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7683f796435e92823e27e11ae2aca25f41d33f63
| 28,027
|
py
|
Python
|
tests/test_dyson_pure_cool_v2.py
|
danjamker/libpurecoollink
|
52aec209d25281128d7819e42eaf9aeb14dff7a0
|
[
"Apache-2.0"
] | 13
|
2018-06-10T07:58:39.000Z
|
2020-06-28T13:00:15.000Z
|
tests/test_dyson_pure_cool_v2.py
|
josh64x2/libpurecoollink
|
65f814f52776a34bc8bafb704c90731b01435686
|
[
"Apache-2.0"
] | 2
|
2018-07-20T09:56:47.000Z
|
2018-07-23T15:07:14.000Z
|
tests/test_dyson_pure_cool_v2.py
|
josh64x2/libpurecoollink
|
65f814f52776a34bc8bafb704c90731b01435686
|
[
"Apache-2.0"
] | 7
|
2018-07-20T09:22:56.000Z
|
2020-06-28T13:00:19.000Z
|
import json
import unittest
from unittest import mock
from unittest.mock import Mock
from libpurecoollink.const import FanPower, FrontalDirection, AutoMode, \
OscillationV2, NightMode, ContinuousMonitoring, \
FanSpeed, ResetFilter, DYSON_PURE_COOL, SLEEP_TIMER_OFF
from libpurecoollink.dyson_device import NetworkDevice
from libpurecoollink.dyson_pure_cool import DysonPureCool
from libpurecoollink.dyson_pure_state_v2 import \
DysonPureCoolV2State, DysonEnvironmentalSensorV2State
def _mocked_send_command(*args):
assert args[0] == '{0}/device-id-1/command'.format(DYSON_PURE_COOL)
payload = json.loads(args[1])
if payload['msg'] == "STATE-SET":
assert payload['time']
assert payload['data']['fpwr'] == "ON"
assert payload['data']['fdir'] == "ON"
assert payload['data']['auto'] == "ON"
assert payload['data']['oson'] == "OION"
assert payload['data']['nmod'] == "ON"
assert payload['data']['rhtm'] == "ON"
assert payload['data']['fnsp'] == "0007"
assert payload['data']['sltm'] == "240"
assert payload['data']['ancp'] == "CUST"
assert payload['data']['osal'] == "110"
assert payload['data']['osau'] == "150"
assert payload['data']['rstf'] == "STET"
assert payload['mode-reason'] == "LAPP"
assert payload['msg'] == "STATE-SET"
assert args[2] == 1
def _mocked_send_command_default(*args):
assert args[0] == '{0}/device-id-1/command'.format(DYSON_PURE_COOL)
payload = json.loads(args[1])
if payload['msg'] == "STATE-SET":
assert payload['time']
assert payload['data']['fpwr'] == "OFF"
assert payload['data']['fdir'] == "OFF"
assert payload['data']['auto'] == "OFF"
assert payload['data']['oson'] == "OIOF"
assert payload['data']['nmod'] == "OFF"
assert payload['data']['rhtm'] == "OFF"
assert payload['data']['fnsp'] == "AUTO"
assert payload['data']['ancp'] == "CUST"
assert payload['data']['osal'] == "0063"
assert payload['data']['osau'] == "0243"
assert payload['data']['rstf'] == "STET"
assert payload['mode-reason'] == "LAPP"
assert payload['msg'] == "STATE-SET"
assert args[2] == 1
def _mocked_send_command_turn_on(*args):
assert args[0] == '{0}/device-id-1/command'.format(DYSON_PURE_COOL)
payload = json.loads(args[1])
if payload['msg'] == "STATE-SET":
assert payload['time']
assert payload['data']['fpwr'] == "ON"
assert payload['mode-reason'] == "LAPP"
assert payload['msg'] == "STATE-SET"
assert args[2] == 1
def _mocked_send_command_turn_off(*args):
assert args[0] == '{0}/device-id-1/command'.format(DYSON_PURE_COOL)
payload = json.loads(args[1])
if payload['msg'] == "STATE-SET":
assert payload['time']
assert payload['data']['fpwr'] == "OFF"
assert payload['mode-reason'] == "LAPP"
assert payload['msg'] == "STATE-SET"
assert args[2] == 1
def _mocked_send_command_oscillation_on_empty(*args):
assert args[0] == '{0}/device-id-1/command'.format(DYSON_PURE_COOL)
payload = json.loads(args[1])
if payload['msg'] == "STATE-SET":
assert payload['time']
assert payload['data']['oson'] == "OION"
assert payload['data']['fpwr'] == "ON"
assert payload['data']['ancp'] == "CUST"
assert payload['data']['osal'] == "0063"
assert payload['data']['osau'] == "0243"
assert payload['mode-reason'] == "LAPP"
assert payload['msg'] == "STATE-SET"
assert args[2] == 1
def _mocked_send_command_oscillation_on(*args):
assert args[0] == '{0}/device-id-1/command'.format(DYSON_PURE_COOL)
payload = json.loads(args[1])
if payload['msg'] == "STATE-SET":
assert payload['time']
assert payload['data']['oson'] == "OION"
assert payload['data']['fpwr'] == "ON"
assert payload['data']['ancp'] == "CUST"
assert payload['data']['osal'] == "0120"
assert payload['data']['osau'] == "0150"
assert payload['mode-reason'] == "LAPP"
assert payload['msg'] == "STATE-SET"
assert args[2] == 1
def _mocked_send_command_oscillation_on_equal(*args):
assert args[0] == '{0}/device-id-1/command'.format(DYSON_PURE_COOL)
payload = json.loads(args[1])
if payload['msg'] == "STATE-SET":
assert payload['time']
assert payload['data']['oson'] == "OION"
assert payload['data']['fpwr'] == "ON"
assert payload['data']['ancp'] == "CUST"
assert payload['data']['osal'] == "0120"
assert payload['data']['osau'] == "0120"
assert payload['mode-reason'] == "LAPP"
assert payload['msg'] == "STATE-SET"
assert args[2] == 1
def _mocked_send_command_oscillation_off(*args):
assert args[0] == '{0}/device-id-1/command'.format(DYSON_PURE_COOL)
payload = json.loads(args[1])
if payload['msg'] == "STATE-SET":
assert payload['time']
assert payload['data']['oson'] == "OIOF"
assert payload['mode-reason'] == "LAPP"
assert payload['msg'] == "STATE-SET"
assert args[2] == 1
def _mocked_send_command_timer_on(*args):
assert args[0] == '{0}/device-id-1/command'.format(DYSON_PURE_COOL)
payload = json.loads(args[1])
if payload['msg'] == "STATE-SET":
assert payload['time']
assert payload['data']['sltm'] == "0540"
assert payload['mode-reason'] == "LAPP"
assert payload['msg'] == "STATE-SET"
assert args[2] == 1
def _mocked_send_command_timer_off(*args):
assert args[0] == '{0}/device-id-1/command'.format(DYSON_PURE_COOL)
payload = json.loads(args[1])
if payload['msg'] == "STATE-SET":
assert payload['time']
assert payload['data']['sltm'] == "OFF"
assert payload['mode-reason'] == "LAPP"
assert payload['msg'] == "STATE-SET"
assert args[2] == 1
def _mocked_send_command_set_speed(*args):
assert args[0] == '{0}/device-id-1/command'.format(DYSON_PURE_COOL)
payload = json.loads(args[1])
if payload['msg'] == "STATE-SET":
assert payload['time']
assert payload['data']['fnsp'] == "0007"
assert payload['mode-reason'] == "LAPP"
assert payload['msg'] == "STATE-SET"
assert args[2] == 1
def _mocked_send_command_front_on(*args):
assert args[0] == '{0}/device-id-1/command'.format(DYSON_PURE_COOL)
payload = json.loads(args[1])
if payload['msg'] == "STATE-SET":
assert payload['time']
assert payload['data']['fdir'] == "ON"
assert payload['mode-reason'] == "LAPP"
assert payload['msg'] == "STATE-SET"
assert args[2] == 1
def _mocked_send_command_front_off(*args):
assert args[0] == '{0}/device-id-1/command'.format(DYSON_PURE_COOL)
payload = json.loads(args[1])
if payload['msg'] == "STATE-SET":
assert payload['time']
assert payload['data']['fdir'] == "OFF"
assert payload['mode-reason'] == "LAPP"
assert payload['msg'] == "STATE-SET"
assert args[2] == 1
def _mocked_send_command_auto_on(*args):
assert args[0] == '{0}/device-id-1/command'.format(DYSON_PURE_COOL)
payload = json.loads(args[1])
if payload['msg'] == "STATE-SET":
assert payload['time']
assert payload['data']['auto'] == "ON"
assert payload['mode-reason'] == "LAPP"
assert payload['msg'] == "STATE-SET"
assert args[2] == 1
def _mocked_send_command_auto_off(*args):
assert args[0] == '{0}/device-id-1/command'.format(DYSON_PURE_COOL)
payload = json.loads(args[1])
if payload['msg'] == "STATE-SET":
assert payload['time']
assert payload['data']['auto'] == "OFF"
assert payload['mode-reason'] == "LAPP"
assert payload['msg'] == "STATE-SET"
assert args[2] == 1
def _mocked_send_command_night_mode_on(*args):
assert args[0] == '{0}/device-id-1/command'.format(DYSON_PURE_COOL)
payload = json.loads(args[1])
if payload['msg'] == "STATE-SET":
assert payload['time']
assert payload['data']['nmod'] == "ON"
assert payload['mode-reason'] == "LAPP"
assert payload['msg'] == "STATE-SET"
assert args[2] == 1
def _mocked_send_command_night_mode_off(*args):
assert args[0] == '{0}/device-id-1/command'.format(DYSON_PURE_COOL)
payload = json.loads(args[1])
if payload['msg'] == "STATE-SET":
assert payload['time']
assert payload['data']['nmod'] == "OFF"
assert payload['mode-reason'] == "LAPP"
assert payload['msg'] == "STATE-SET"
assert args[2] == 1
class TestPureCool(unittest.TestCase):
def setUp(self):
device = DysonPureCool({
"Serial": "device-id-1",
"Name": "device-1",
"ScaleUnit": "SU01",
"Version": "21.03.08",
"LocalCredentials": "1/aJ5t52WvAfn+z+fjDuef86kQDQPefbQ6/70ZGysII1K"
"e1i0ZHakFH84DZuxsSQ4KTT2vbCm7uYeTORULKLKQ==",
"AutoUpdate": True,
"NewVersionAvailable": False,
"ProductType": DYSON_PURE_COOL
})
network_device = NetworkDevice('device-1', 'host', 1111)
device._add_network_device(network_device)
device._current_state = DysonPureCoolV2State(
open("tests/data/state_pure_cool.json", "r").read())
device.connection_callback(True)
device.state_data_available()
device.sensor_data_available()
self._device = device
def tearDown(self):
pass
@mock.patch('paho.mqtt.client.Client.publish',
side_effect=_mocked_send_command)
@mock.patch('paho.mqtt.client.Client.connect')
def test_set_configuration(self, mocked_connect, mocked_publish):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self._device\
.set_configuration(fan_power=FanPower.POWER_ON,
front_direction=FrontalDirection.FRONTAL_ON,
auto_mode=AutoMode.AUTO_ON,
oscillation=OscillationV2.OSCILLATION_ON,
night_mode=NightMode.NIGHT_MODE_ON,
continuous_monitoring=ContinuousMonitoring.
MONITORING_ON,
fan_speed=FanSpeed.FAN_SPEED_7,
sleep_timer=240,
oscillation_angle_low=110,
oscillation_angle_high=150,
reset_filter=ResetFilter.DO_NOTHING,
)
self.assertEqual(mocked_publish.call_count, 3)
self.assertEqual(self._device.__repr__(),
"DysonPureCool(serial=device-id-1,active=None,"
"name=device-1,version=21.03.08,auto_update=True,"
"new_version_available=False,product_type=438,"
"network_device=NetworkDevice(name=device-1,"
"address=host,port=1111))")
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.publish',
side_effect=_mocked_send_command_default)
@mock.patch('paho.mqtt.client.Client.connect')
def test_set_configuration_empty(self, mocked_connect, mocked_publish):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self._device.set_configuration()
self.assertEqual(mocked_publish.call_count, 3)
self.assertEqual(self._device.__repr__(),
"DysonPureCool(serial=device-id-1,active=None,"
"name=device-1,version=21.03.08,auto_update=True,"
"new_version_available=False,product_type=438,"
"network_device=NetworkDevice(name=device-1,"
"address=host,port=1111))")
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.publish',
side_effect=_mocked_send_command_turn_on)
@mock.patch('paho.mqtt.client.Client.connect')
def test_turn_on(self, mocked_connect, mocked_publish):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self._device.turn_on()
self.assertEqual(mocked_publish.call_count, 3)
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.publish',
side_effect=_mocked_send_command_turn_off)
@mock.patch('paho.mqtt.client.Client.connect')
def test_turn_off(self, mocked_connect, mocked_publish):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self._device.turn_off()
self.assertEqual(mocked_publish.call_count, 3)
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.publish',
side_effect=_mocked_send_command_oscillation_on_empty)
@mock.patch('paho.mqtt.client.Client.connect')
def test_turn_oscillation_on_empty(self, mocked_connect, mocked_publish):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self._device.enable_oscillation()
self.assertEqual(mocked_publish.call_count, 3)
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.publish',
side_effect=_mocked_send_command_oscillation_on)
@mock.patch('paho.mqtt.client.Client.connect')
def test_turn_oscillation_on(self, mocked_connect, mocked_publish):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self._device.enable_oscillation(120, 150)
self.assertEqual(mocked_publish.call_count, 3)
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.publish',
side_effect=_mocked_send_command_oscillation_on_equal)
@mock.patch('paho.mqtt.client.Client.connect')
def test_turn_oscillation_on_equal(self, mocked_connect, mocked_publish):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self._device.enable_oscillation(120, 120)
self.assertEqual(mocked_publish.call_count, 3)
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.connect')
def test_oson_wrong_args_raise_errors(self, mocked_connect):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self.assertRaises(TypeError,
self._device.enable_oscillation, "test", 160)
self.assertRaises(TypeError,
self._device.enable_oscillation, 160, "test")
self.assertRaises(ValueError,
self._device.enable_oscillation, 1, 110)
self.assertRaises(ValueError,
self._device.enable_oscillation, 356, 110)
self.assertRaises(ValueError,
self._device.enable_oscillation, 110, 1)
self.assertRaises(ValueError,
self._device.enable_oscillation, 110, 356)
self.assertRaises(ValueError,
self._device.enable_oscillation, 355, 5)
self.assertRaises(ValueError,
self._device.enable_oscillation, 110, 129)
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.publish',
side_effect=_mocked_send_command_oscillation_off)
@mock.patch('paho.mqtt.client.Client.connect')
def test_oscillation_off(self, mocked_connect, mocked_publish):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self._device.disable_oscillation()
self.assertEqual(mocked_publish.call_count, 3)
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.publish',
side_effect=_mocked_send_command_timer_on)
@mock.patch('paho.mqtt.client.Client.connect')
def test_timer_on(self, mocked_connect, mocked_publish):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self._device.enable_sleep_timer(540)
self.assertEqual(mocked_publish.call_count, 3)
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.connect')
def test_sltm_wrong_arg_rise_errors(self, mocked_connect):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self.assertRaises(TypeError,
self._device.enable_sleep_timer)
self.assertRaises(TypeError,
self._device.enable_sleep_timer, "test")
self.assertRaises(ValueError,
self._device.enable_sleep_timer, 0)
self.assertRaises(ValueError,
self._device.enable_sleep_timer, 541)
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.publish',
side_effect=_mocked_send_command_timer_off)
@mock.patch('paho.mqtt.client.Client.connect')
def test_timer_off(self, mocked_connect, mocked_publish):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self._device.disable_sleep_timer()
self.assertEqual(mocked_publish.call_count, 3)
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.publish',
side_effect=_mocked_send_command_set_speed)
@mock.patch('paho.mqtt.client.Client.connect')
def test_set_speed(self, mocked_connect, mocked_publish):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self._device.set_fan_speed(FanSpeed.FAN_SPEED_7)
self.assertEqual(mocked_publish.call_count, 3)
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.connect')
def test_set_speed_wrong_value_raise_error(self, mocked_connect):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self.assertRaises(TypeError,
self._device.set_fan_speed, "test")
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.publish',
side_effect=_mocked_send_command_front_on)
@mock.patch('paho.mqtt.client.Client.connect')
def test_front_on(self, mocked_connect, mocked_publish):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self._device.enable_frontal_direction()
self.assertEqual(mocked_publish.call_count, 3)
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.publish',
side_effect=_mocked_send_command_front_off)
@mock.patch('paho.mqtt.client.Client.connect')
def test_front_off(self, mocked_connect, mocked_publish):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self._device.disable_frontal_direction()
self.assertEqual(mocked_publish.call_count, 3)
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.publish',
side_effect=_mocked_send_command_auto_on)
@mock.patch('paho.mqtt.client.Client.connect')
def test_auto_on(self, mocked_connect, mocked_publish):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self._device.enable_auto_mode()
self.assertEqual(mocked_publish.call_count, 3)
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.publish',
side_effect=_mocked_send_command_auto_off)
@mock.patch('paho.mqtt.client.Client.connect')
def test_auto_off(self, mocked_connect, mocked_publish):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self._device.disable_auto_mode()
self.assertEqual(mocked_publish.call_count, 3)
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.publish',
side_effect=_mocked_send_command_night_mode_on)
@mock.patch('paho.mqtt.client.Client.connect')
def test_night_mode_on(self, mocked_connect, mocked_publish):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self._device.enable_night_mode()
self.assertEqual(mocked_publish.call_count, 3)
self._device.disconnect()
@mock.patch('paho.mqtt.client.Client.publish',
side_effect=_mocked_send_command_night_mode_off)
@mock.patch('paho.mqtt.client.Client.connect')
def test_night_mode_off(self, mocked_connect, mocked_publish):
connected = self._device.auto_connect()
self.assertTrue(connected)
self.assertEqual(mocked_connect.call_count, 1)
self._device.disable_night_mode()
self.assertEqual(mocked_publish.call_count, 3)
self._device.disconnect()
def test_dyson_v2_state(self):
dyson_state = DysonPureCoolV2State(
open("tests/data/state_pure_cool.json", "r").read())
self.assertEqual(dyson_state.fan_power, FanPower.POWER_OFF.value)
self.assertEqual(dyson_state.front_direction,
FrontalDirection.FRONTAL_OFF.value)
self.assertEqual(dyson_state.auto_mode, AutoMode.AUTO_OFF.value)
self.assertEqual(dyson_state.oscillation_status, "OFF")
self.assertEqual(dyson_state.oscillation,
OscillationV2.OSCILLATION_OFF.value)
self.assertEqual(dyson_state.night_mode,
NightMode.NIGHT_MODE_OFF.value)
self.assertEqual(dyson_state.continuous_monitoring,
ContinuousMonitoring.MONITORING_OFF.value)
self.assertEqual(dyson_state.fan_state, "FAN")
self.assertEqual(dyson_state.night_mode_speed, "0004")
self.assertEqual(dyson_state.speed, FanSpeed.FAN_SPEED_AUTO.value)
self.assertEqual(dyson_state.carbon_filter_state, "0100")
self.assertEqual(dyson_state.hepa_filter_state, "0100")
self.assertEqual(dyson_state.sleep_timer, SLEEP_TIMER_OFF)
self.assertEqual(dyson_state.oscillation_angle_low, "0063")
self.assertEqual(dyson_state.oscillation_angle_high, "0243")
self.assertEqual(dyson_state.__repr__(),
"DysonPureCoolV2State(fan_power=OFF,"
"front_direction=OFF,auto_mode=OFF,"
"oscillation_status=OFF,oscillation=OIOF,"
"night_mode=OFF,continuous_monitoring=OFF,"
"fan_state=FAN,night_mode_speed=0004,"
"speed=AUTO,carbon_filter_state=0100,"
"hepa_filter_state=0100,sleep_timer=OFF,"
"oscillation_angle_low=0063,"
"oscillation_angle_high=0243)")
def test_dyson_v2_sensor_state(self):
dyson_sensor_state = DysonEnvironmentalSensorV2State(
open("tests/data/sensor_pure_cool.json", "r").read())
self.assertEqual(dyson_sensor_state.temperature, 297.7)
self.assertEqual(dyson_sensor_state.humidity, 58)
self.assertEqual(dyson_sensor_state.particulate_matter_25, 9)
self.assertEqual(dyson_sensor_state.particulate_matter_10, 5)
self.assertEqual(dyson_sensor_state.volatile_organic_compounds, 4)
self.assertEqual(dyson_sensor_state.volatile_organic_compounds, 4)
self.assertEqual(dyson_sensor_state.p25r, 10)
self.assertEqual(dyson_sensor_state.p10r, 9)
self.assertEqual(dyson_sensor_state.__repr__(),
"DysonEnvironmentalSensorV2State("
"temperature=297.7,humidity=58,"
"particulate_matter_25=9,particulate_matter_10=5,"
"volatile_organic_compounds=4,nitrogen_dioxide=11,"
"p25r=10,p10r=9,sleep_timer=0)")
def test_dyson_v2_sensor_state_off(self):
dyson_sensor_state = DysonEnvironmentalSensorV2State(
open("tests/data/sensor_pure_cool_off.json", "r").read())
self.assertEqual(dyson_sensor_state.temperature, 0)
self.assertEqual(dyson_sensor_state.humidity, 0)
self.assertEqual(dyson_sensor_state.particulate_matter_25, 0)
self.assertEqual(dyson_sensor_state.particulate_matter_10, 0)
self.assertEqual(dyson_sensor_state.volatile_organic_compounds, 0)
self.assertEqual(dyson_sensor_state.volatile_organic_compounds, 0)
self.assertEqual(dyson_sensor_state.p25r, 0)
self.assertEqual(dyson_sensor_state.p10r, 0)
self.assertEqual(dyson_sensor_state.__repr__(),
"DysonEnvironmentalSensorV2State("
"temperature=0,humidity=0,particulate_matter_25=0,"
"particulate_matter_10=0,"
"volatile_organic_compounds=0,nitrogen_dioxide=0,"
"p25r=0,p10r=0,sleep_timer=0)")
def test_dyson_v2_sensor_state_init(self):
dyson_sensor_state = DysonEnvironmentalSensorV2State(
open("tests/data/sensor_pure_cool_init.json", "r").read())
self.assertEqual(dyson_sensor_state.temperature, 0)
self.assertEqual(dyson_sensor_state.humidity, 0)
self.assertEqual(dyson_sensor_state.particulate_matter_25, 0)
self.assertEqual(dyson_sensor_state.particulate_matter_10, 0)
self.assertEqual(dyson_sensor_state.volatile_organic_compounds, 0)
self.assertEqual(dyson_sensor_state.volatile_organic_compounds, 0)
self.assertEqual(dyson_sensor_state.p25r, 0)
self.assertEqual(dyson_sensor_state.p10r, 0)
self.assertEqual(dyson_sensor_state.__repr__(),
"DysonEnvironmentalSensorV2State("
"temperature=0,humidity=0,particulate_matter_25=0,"
"particulate_matter_10=0,"
"volatile_organic_compounds=0,nitrogen_dioxide=0,"
"p25r=0,p10r=0,sleep_timer=0)")
def test_on_state_v2_message(self):
def on_message(msg):
assert isinstance(msg, DysonPureCoolV2State)
self._device.add_message_listener(on_message)
msg = Mock()
payload = open("tests/data/state_pure_cool.json", "r").read()
msg.payload = Mock()
msg.payload.decode.return_value = payload
DysonPureCool.on_message(None, self._device, msg)
def test_on_sensor_v2_message(self):
def on_message(msg):
assert isinstance(msg, DysonEnvironmentalSensorV2State)
self._device.add_message_listener(on_message)
msg = Mock()
payload = open("tests/data/sensor_pure_cool.json", "r").read()
msg.payload = Mock()
msg.payload.decode.return_value = payload
DysonPureCool.on_message(None, self._device, msg)
| 41.276878
| 79
| 0.63735
| 3,212
| 28,027
| 5.299813
| 0.066002
| 0.077131
| 0.049932
| 0.03695
| 0.872114
| 0.862598
| 0.830876
| 0.804324
| 0.767021
| 0.757505
| 0
| 0.023119
| 0.236058
| 28,027
| 678
| 80
| 41.337758
| 0.77194
| 0
| 0
| 0.655357
| 0
| 0
| 0.164127
| 0.111749
| 0
| 0
| 0
| 0
| 0.45
| 1
| 0.083929
| false
| 0.001786
| 0.014286
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
768befde4124da5ce2fab4c764c0a73fd04af80e
| 590
|
py
|
Python
|
torchvision/io/__init__.py
|
liyichao/vision
|
53b062ca58932bbf387b96f2dd3397c4495b735b
|
[
"BSD-3-Clause"
] | 1
|
2019-10-22T04:37:14.000Z
|
2019-10-22T04:37:14.000Z
|
torchvision/io/__init__.py
|
liyichao/vision
|
53b062ca58932bbf387b96f2dd3397c4495b735b
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/io/__init__.py
|
liyichao/vision
|
53b062ca58932bbf387b96f2dd3397c4495b735b
|
[
"BSD-3-Clause"
] | 1
|
2019-10-24T01:00:26.000Z
|
2019-10-24T01:00:26.000Z
|
from .video import write_video, read_video, read_video_timestamps
from ._video_opt import (
_read_video_from_file,
_read_video_timestamps_from_file,
_probe_video_from_file,
_read_video_from_memory,
_read_video_timestamps_from_memory,
_probe_video_from_memory,
_HAS_VIDEO_OPT,
)
__all__ = [
'write_video', 'read_video', 'read_video_timestamps',
'_read_video_from_file', '_read_video_timestamps_from_file', '_probe_video_from_file',
'_read_video_from_memory', '_read_video_timestamps_from_memory', '_probe_video_from_memory',
'_HAS_VIDEO_OPT',
]
| 31.052632
| 96
| 0.789831
| 80
| 590
| 4.925
| 0.15
| 0.274112
| 0.28934
| 0.291878
| 0.908629
| 0.908629
| 0.908629
| 0.715736
| 0.715736
| 0.715736
| 0
| 0
| 0.132203
| 590
| 18
| 97
| 32.777778
| 0.769531
| 0
| 0
| 0
| 0
| 0
| 0.359322
| 0.3
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
76905ba57fc7f966974d8b0c6948e4cd7b2b4e47
| 206
|
py
|
Python
|
tests/test_misc.py
|
poudel/dj-username-tools
|
bf5bfd3c2e92e3b7ea8433258b3966805766786c
|
[
"MIT"
] | 5
|
2018-02-24T02:44:51.000Z
|
2022-03-19T10:17:25.000Z
|
tests/test_misc.py
|
poudel/dj-username-tools
|
bf5bfd3c2e92e3b7ea8433258b3966805766786c
|
[
"MIT"
] | 4
|
2018-02-23T19:16:31.000Z
|
2018-02-24T01:36:02.000Z
|
tests/test_misc.py
|
poudel/dj-username-tools
|
bf5bfd3c2e92e3b7ea8433258b3966805766786c
|
[
"MIT"
] | null | null | null |
from django.test import TestCase # noqa
from username_tools.admin import UsernameBlacklistAdmin # noqa
from username_tools.apps import UsernameToolsConfig # noqa
# this is just here to satisfy coverage
| 34.333333
| 63
| 0.820388
| 27
| 206
| 6.185185
| 0.703704
| 0.095808
| 0.191617
| 0.251497
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145631
| 206
| 5
| 64
| 41.2
| 0.948864
| 0.252427
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4f2105518d1e7dbac46e750c892e06f17a7358a3
| 3,582
|
py
|
Python
|
django_gravatar/tests.py
|
skitoo/django-gravatar
|
9ebc8880caa2346652373f34bbbd1627b3bf0f23
|
[
"MIT"
] | 4
|
2015-10-28T19:35:45.000Z
|
2019-12-10T09:44:10.000Z
|
django_gravatar/tests.py
|
skitoo/django-gravatar
|
9ebc8880caa2346652373f34bbbd1627b3bf0f23
|
[
"MIT"
] | 1
|
2020-08-19T20:49:24.000Z
|
2020-08-19T20:49:24.000Z
|
django_gravatar/tests.py
|
skitoo/django-gravatar
|
9ebc8880caa2346652373f34bbbd1627b3bf0f23
|
[
"MIT"
] | 2
|
2015-10-28T19:35:47.000Z
|
2020-05-17T21:25:34.000Z
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from .templatetags.gravatar import gravatar_url, gravatar
class TestGravatar(TestCase):
def test_gravatar_url(self):
self.assertEqual('http://www.gravatar.com/avatar/d7935ea08d17e261d3b0d12c04759c9d?s=80&d=', gravatar_url('alexis.couronne@gmail.com'))
self.assertEqual('http://www.gravatar.com/avatar/d7935ea08d17e261d3b0d12c04759c9d?s=120&d=', gravatar_url('alexis.couronne@gmail.com', 120))
def test_gavatar(self):
html = '<img src="http://www.gravatar.com/avatar/d7935ea08d17e261d3b0d12c04759c9d?s=80&d=" width="80" height="80" >'
self.assertEqual(html, gravatar('alexis.couronne@gmail.com'))
html = '<img src="http://www.gravatar.com/avatar/d7935ea08d17e261d3b0d12c04759c9d?s=130&d=" width="130" height="130" >'
self.assertEqual(html, gravatar('alexis.couronne@gmail.com', 130))
html = '<img src="http://www.gravatar.com/avatar/d7935ea08d17e261d3b0d12c04759c9d?s=180&d=" width="180" height="180" class="avatar" >'
self.assertEqual(html, gravatar('alexis.couronne@gmail.com', 180, 'class="avatar"'))
class TestGravatarWithSecureActivated(TestCase):
def test_gravatar_url(self):
with self.settings(GRAVATAR_SECURE=True):
self.assertEqual('https://www.gravatar.com/avatar/d7935ea08d17e261d3b0d12c04759c9d?s=80&d=', gravatar_url('alexis.couronne@gmail.com'))
self.assertEqual('https://www.gravatar.com/avatar/d7935ea08d17e261d3b0d12c04759c9d?s=120&d=', gravatar_url('alexis.couronne@gmail.com', 120))
def test_gavatar(self):
with self.settings(GRAVATAR_SECURE=True):
html = '<img src="https://www.gravatar.com/avatar/d7935ea08d17e261d3b0d12c04759c9d?s=80&d=" width="80" height="80" >'
self.assertEqual(html, gravatar('alexis.couronne@gmail.com'))
html = '<img src="https://www.gravatar.com/avatar/d7935ea08d17e261d3b0d12c04759c9d?s=130&d=" width="130" height="130" >'
self.assertEqual(html, gravatar('alexis.couronne@gmail.com', 130))
html = '<img src="https://www.gravatar.com/avatar/d7935ea08d17e261d3b0d12c04759c9d?s=180&d=" width="180" height="180" class="avatar" >'
self.assertEqual(html, gravatar('alexis.couronne@gmail.com', 180, 'class="avatar"'))
class TestGravatarWithDefaultGravatarUrl(TestCase):
def test_gravatar_url(self):
with self.settings(GRAVATAR_DEFAULT_URL='www.foo.com'):
self.assertEqual('http://www.gravatar.com/avatar/d7935ea08d17e261d3b0d12c04759c9d?s=80&d=www.foo.com', gravatar_url('alexis.couronne@gmail.com'))
self.assertEqual('http://www.gravatar.com/avatar/d7935ea08d17e261d3b0d12c04759c9d?s=120&d=www.foo.com', gravatar_url('alexis.couronne@gmail.com', 120))
def test_gavatar(self):
with self.settings(GRAVATAR_DEFAULT_URL='www.foo.com'):
html = '<img src="http://www.gravatar.com/avatar/d7935ea08d17e261d3b0d12c04759c9d?s=80&d=www.foo.com" width="80" height="80" >'
self.assertEqual(html, gravatar('alexis.couronne@gmail.com'))
html = '<img src="http://www.gravatar.com/avatar/d7935ea08d17e261d3b0d12c04759c9d?s=130&d=www.foo.com" width="130" height="130" >'
self.assertEqual(html, gravatar('alexis.couronne@gmail.com', 130))
html = '<img src="http://www.gravatar.com/avatar/d7935ea08d17e261d3b0d12c04759c9d?s=180&d=www.foo.com" width="180" height="180" class="avatar" >'
self.assertEqual(html, gravatar('alexis.couronne@gmail.com', 180, 'class="avatar"'))
| 61.758621
| 163
| 0.70268
| 435
| 3,582
| 5.735632
| 0.103448
| 0.09018
| 0.084168
| 0.12024
| 0.932265
| 0.92505
| 0.913026
| 0.90501
| 0.90501
| 0.89499
| 0
| 0.138754
| 0.134841
| 3,582
| 57
| 164
| 62.842105
| 0.666344
| 0.005863
| 0
| 0.487179
| 0
| 0.282051
| 0.550871
| 0.105396
| 0
| 0
| 0
| 0
| 0.384615
| 1
| 0.153846
| false
| 0
| 0.051282
| 0
| 0.282051
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4f62aadfeb7271b1d0c2f69d286f576b6e6cc6f5
| 6,703
|
py
|
Python
|
Python/windwardrestapi/Model/SalesforceOAuthDataSource.py
|
windward-studios/Windward-REST-version-2-Clients
|
8fd467e6f4ece6fcc435609ffb23448d07af3131
|
[
"MIT"
] | null | null | null |
Python/windwardrestapi/Model/SalesforceOAuthDataSource.py
|
windward-studios/Windward-REST-version-2-Clients
|
8fd467e6f4ece6fcc435609ffb23448d07af3131
|
[
"MIT"
] | 1
|
2020-10-12T20:32:05.000Z
|
2020-10-12T20:38:04.000Z
|
Python/windwardrestapi/Model/SalesforceOAuthDataSource.py
|
windward-studios/Windward-REST-version-2-Clients
|
8fd467e6f4ece6fcc435609ffb23448d07af3131
|
[
"MIT"
] | null | null | null |
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x08\x00\x55\x0d\x0d\x0a\x04\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\x42\x06\x00\x00\x00\x00\x00\x10\xcd\x19\x3d\xba\xe7\xd5\x70\xfa\x2e\x4c\xcf\xfe\x57\x12\x97\x6b\x00\x00\x00\x00\x00\x00\x00\x00\x74\xf3\x46\x4a\x24\x21\x53\xd4\x04\xa7\x33\x0a\xe9\x9a\x60\x86\xbb\x18\xd4\x46\xed\xd4\x45\x12\x57\x6f\x31\x4b\x1f\xc3\xc4\x91\x00\xde\xff\x9f\x02\xa4\x30\x9e\x56\xac\x4b\xe0\x9d\xe6\xdf\x6f\xed\x37\x76\xf7\x8e\x0a\x1e\x5f\x84\x79\xe1\xbe\xc8\x7e\x16\xf6\x2f\xed\x7e\xfa\x37\xfa\x58\xb4\xe5\xf3\xac\x7a\x68\x17\xaf\x59\x59\xe4\xd4\x3b\x29\xee\xdb\x70\x71\x56\xfa\xd4\x0e\xd0\x2d\xf7\xf9\x53\x97\x3f\x0f\x14\x8f\xc4\x68\x0d\x25\xf1\xeb\x62\x3d\xf8\xda\xf6\x09\x97\x93\xe6\xd0\x2f\xdf\xb9\x66\x1a\x1f\x0d\x2c\x57\x9e\x8c\x9a\x23\x31\xc7\x3c\x15\xa6\x32\x34\x7e\xcb\x60\xe9\x9f\x07\x70\x04\x72\x38\xbe\x15\x84\xf0\x04\xa4\xfe\xd4\xee\xcc\x94\x74\xb8\x4d\x6d\xcd\xa1\xdd\xd4\x9e\x44\x46\xf3\x93\xb7\xe6\xd2\xa1\x3b\x5c\x19\x5a\x90\xd5\x44\x43\xdf\x94\x2a\x5d\xa6\xb8\x43\x2c\x89\xbe\x87\xf5\xa5\x60\x70\x8a\xf4\x21\xd2\x45\x1a\x46\xfa\xa6\x24\xa7\x7f\x5b\x6a\xc4\xc0\x5f\xe3\x0d\xaf\xc9\xf6\xdc\x9b\x28\xd7\x1d\x34\xa3\x9d\x9b\xba\xcb\x63\x34\x76\xa9\x98\x9c\x36\xd5\x06\x3f\xd3\x21\x38\xf5\x4f\xc6\x98\xc9\xc0\xba\xcc\xa5\xb4\x53\x3f\xd9\xdb\x4a\x98\x37\x27\x28\xa0\x45\x1e\x28\xaa\x53\xc6\x7e\x71\x28\xd9\x38\x0d\xe3\x9f\x76\x65\xc8\xb9\xee\xcc\xec\x93\xf1\x90\x24\xce\xfb\x9d\xa8\x13\x4d\x73\x24\x98\xae\x05\xc3\x5d\x4e\x4a\x73\x7a\xcd\xc0\xae\x06\x36\x11\x24\x39\x98\xeb\x37\x41\x5f\x58\x5f\x8b\x5d\xfe\x9f\x10\x3c\xc0\x59\x65\x6f\x16\x4d\x53\x50\x8e\x88\x7a\x8e\xf5\xb8\xb0\xbd\x53\x4c\x74\x0f\xbe\x1a\x86\x8b\xfa\x94\xa4\x98\xea\x85\x73\x01\x6e\x4a\x38\xe3\xf1\x95\x3f\xdf\x93\xf1\x40\xef\xc7\x44\xa8\xd5\x87\xf4\xc5\x49\xd1\xa8\x0d\x31\x3a\xe1\xb0\xf9\xf1\x7c\x7a\xfa\x52\xfd\x08\x15\xfe\xbb\x19\xba\x65\x50\xfd\xa8\x5b\xc7\x62\x35\x90\xb9\x55\x0c\x46\x9c\x74\xde\xda\x2c\xb1\xbb\xca\xc1\x25\xd9\xc2\xc0\xcf\xd3\x15\x7b\x00\xbc\x29\x21\x6f\x15\xf9\x53\x47\xd5\x31\xf5\x25\xba\xa7\xb6\xf5\xa9\xe5\xa5\x55\x01\x57\x81\xbb\x7f\x68\xef\x6d\xbe\x98\x03\x66\x88\x96\xc6\x57\x4c\x6f\x78\xc7\x37\xa1\x00\x76\x07\xcb\xf4\xa0\x92\xbe\x62\x02\x31\x68\x0e\x73\xb2\xea\x20\xcc\xc5\x6a\x53\xdb\x4c\x2e\xf1\x67\xf8\xd4\xba\x78\x75\xf9\x7e\xbd\x3d\x7b\x9a\xf5\x65\x12\x41\xbf\x58\x96\x35\x6d\x2c\x9e\xda\xaa\x64\x5b\x0c\x51\x4a\xcf\xf4\x6a\xd4\x3a\xff\x89\x48\x70\xe8\x9a\xda\xb5\x63\xc8\x12\xed\x96\x10\x51\xee\xa6\x1c\xac\x00\x01\x48\x80\x3b\xdc\x07\xac\xd7\x20\x91\x34\x84\xa0\x3a\xdb\x78\xcf\x9b\xf6\xbb\xca\x1b\xd8\xde\x15\x2d\x7b\x06\x58\x1f\x0b\xae\xd9\xbf\x42\xb4\x22\xa1\xad\xc2\xff\xfa\x72\x10\x49\x75\x7b\x82\x83\xf2\x56\x39\x2d\xd7\x0f\x63\x82\xf7\xc4\x15\xad\xbf\x57\x2c\x88\x5d\xc9\x7c\x5f\xec\xb6\x8f\xf2\x90\x2b\x2f\xe9\x2a\x03\xac\x08\x1f\xd0\x77\xf8\x41\xb6\xdd\x50\x0b\xc6\xf7\x91\xb2\xd8\x01\x5d\xb5\x59\x94\x0f\x86\xbc\x3b\xa1\xc3\x9b\x21\x52\x78\x31\x59\xba\x16\xc2\x48\x70\x46\x17\x20\x05\x8a\x5f\x77\x9d\x8c\x52\x91\x8d\xff\x3a\x1e\x0a\x00\x26\x0f\xfc\xf1\x98\x90\x57\xa2\x18\x4c\x68\x64\x97\x12\x9e\x9f\xa7\xc3\x18\x37\x13\x6d\x84\x5d\x2c\x4d\x89\x6e\xf5\x81\x96\x59\x00\x6a\xdd\x72\x2a\xa8\x62\xef\xb5\x00\xd2\x87\x06\x4b\x77\xb2\xc6\xb5\xbc\x56\xa4\xc3\xbc\x3b\xe1\xef\x27\xba\xbf\xa7\xf5\x5f\xab\x7a\x53\x38\xa5\xcc\xa4\xbf\x0b\xa4\xbd\xbf\x1a\x80\x9a\x08\x63\x31\xc9\xb9\xa7\xda\xf2\x8f\x3b\x23\xcd\xca\xc9\x9a\xb4\xce\xaa\x32\x3e\x1d\xd7\x1d\xa4\x4c\xbd\x2b\xca\xdf\xd4\xc5\xa5\x64\xff\x94\x4b\x77\x00\x3b\x5b\x5c\x12\xb3\x83\x79\xab\xb7\xe6\x40\x3d\xa7\x64\x06\xef\xab\x71\xd3\xde\xd9\x12\x74\xfb\x8f\x2f\xe3\x41\x9f\x3c\x2d\x09\x5b\xda\xf3\x68\x65\x50\x7d\x75\x1c\x1b\x2f\x87\xf6\xc7\xed\x7a\x16\x7a\x74\x10\x38\x44\x1f\x00\x4d\x09\x69\xad\x78\x1b\x4e\x1a\x85\x3d\x91\x10\xe4\x77\x91\x69\x95\x01\x87\x3d\xde\x24\xee\x8e\xe7\x67\x1f\x62\xc2\xb9\x09\x1f\xee\x0e\xe3\x35\x3c\x90\xfb\xf9\x7e\x17\x13\x82\x85\xbc\x3b\xf0\x96\x7d\x23\x84\x07\xd5\x30\xb7\x48\x88\x03\x67\x50\xd1\x8a\x12\x1b\x6f\xf2\xd5\x8a\xb4\x5e\x4e\xf5\xe5\x94\x53\x45\x51\x6d\x33\x87\xa6\xfa\xba\x69\x39\x37\x5c\x34\xb3\xef\x04\x2b\x25\xf8\xfa\x90\x6a\x6c\x75\x8b\x07\x30\x78\x0d\x26\x82\x0b\x18\x40\xcb\xdd\x08\x90\x5f\x5b\x16\x60\xc4\x23\x1e\x24\x95\x14\xb9\x6d\x4b\x11\xaf\x1b\x61\xca\xb7\x28\x12\xaf\x1b\xd1\x25\xfc\x1b\x80\xa0\x04\x29\xe0\x73\xd6\x5d\xbc\x37\xc7\xb2\x4d\xb8\x46\x25\x84\x80\x5b\x11\xd5\x76\x13\x52\x76\x16\x1d\xc3\xe0\xf9\x3e\x51\xb0\xfd\x8f\x71\xb9\x48\xfa\xc1\xce\xf6\x50\x90\x9a\x5c\x37\x2b\xbc\xa2\xae\xff\xc8\x28\x21\x17\xb4\xa8\xc1\x4c\x09\xf9\xae\xd0\xb9\xba\x5a\x07\xd7\x37\x06\xff\x7d\x4e\xe5\x27\x29\xfd\xd0\xdc\x87\x0b\xab\x7f\x4c\xca\x59\x93\x2a\xcc\xf6\xef\xb0\xc6\x8d\x6f\x96\x0c\x5b\x13\x87\x54\x8e\x8a\xe7\xbd\x25\x9b\xf5\x68\xd5\x7e\x55\x1b\xa4\x44\xdb\x17\xfb\x0b\x44\x55\xca\x70\x3d\xa4\x82\x78\xdb\x43\xc7\xf8\x2e\x7c\x86\xc1\x41\x2d\xbf\x4f\x6d\xfb\xd3\x83\x07\xb9\xa5\xc2\x62\x61\xa4\x0d\xc5\x01\x0f\xcd\x3d\xec\xb9\x5c\xce\x78\x92\x53\x99\x76\x90\x2f\xf7\x43\x3f\x9f\x80\xc9\x35\xa2\x13\xd6\xae\xca\x76\x66\x3d\xac\x98\x41\x18\x8a\xc9\x85\xf6\x3c\xa0\x12\xee\x10\x6a\xd8\x2b\xf7\x0d\x60\xa4\x13\x2b\x44\xbf\x4f\xb0\x91\x7c\x09\x7d\x7f\x45\xf9\xdc\x99\xfc\xe0\xd8\xd5\x61\xf8\x47\x2e\x7c\x1b\x32\x76\x79\x14\x4c\xb8\x23\x01\xd3\x82\x74\x53\xaf\xc3\xeb\x4a\x78\x52\x19\x14\xa0\xc9\x1a\x20\x69\x5e\x6c\xaf\xd2\x00\xce\x3c\x57\xfb\xb9\x99\x83\x28\x75\x7f\x9a\x77\x37\x31\xa3\x47\x1d\xb1\x7e\xf9\xb6\x87\x21\xc6\x85\x5f\xcc\x5a\xc4\x0f\xcc\x79\xf2\xbc\x7e\xf9\x46\x84\xa4\x78\x87\x72\xdb\x14\x98\x1e\x31\x67\x86\x8a\xee\xd8\x20\xf2\x82\xcc\x52\x1b\xd5\xa7\x9a\x94\x74\x09\x0d\xd2\x43\x9e\xe9\x7b\xa3\xa6\x07\x4d\xae\x1e\xc5\xdc\xe3\x6a\x0e\x61\x81\x2d\x1b\x3a\x29\x1d\x8e\xa1\x51\xdc\xec\xcf\xcb\x43\x45\x61\xa1\x23\x19\xee\x2c\x3b\x29\x21\xd9\xbf\xe4\xdb\x0a\xe4\x83\x86\x95\x5c\x96\xf1\xf0\xb1\xdd\x1d\xd9\xb8\xc7\xcb\x27\x78\x36\x5f\xad\xe1\x13\xf0\xa3\x08\x32\xe8\x8b\x18\x98\xe9\xd8\xce\x41\x92\xf4\x1c\x90\x74\x6f\x89\x49\x2a\xf9\x1a\x2f\xb5\x63\xe2\x3b\xa8\xb9\xb5\x67\x20\xd1\x8f\x85\xa0\xe3\xdb\x35\x62\xb7\x81\x6b\x7f\x1b\x33\x4e\x35\xf5\x95\xd8\xa0\x91\x34\x4e\xbf\x20\x93\x90\x11\x01\x66\xfb\x12\xb7\xf2\x78\x85\x8f\x6d\x18\x44\xa1\xf8\x78\x8f\x6b\x7b\xc3\x37\x26\x94\xe5\x41\xb1\xec\x14\x01\x00\x35\xba\xe2\x81\xf0\xff\xc0\x69\xae\xe2\x26\x1d\x36\x96\xe2\x7f\xec\x5b\x2f\x01\x07\xc3\x37\x18\xc6\xdb\xeb\xa1\x40\xe3\xa0\x2b\xf4\x56\x6d\x16\x9b\xde\x63\xdf\xef\xa0\x0f\x9a\xf4\xc8\x93\x45\x77\x9e\xf8\xd7\x16\x6b\x26\xc2\x4f\xc5\x5b\x20\x2d\x0b\x17\xc7\x6d\xad\x21\xc3\x2d\xa4\xd7\xac\xf7\x30\xc5\x7e\x65\xa4\xf7\xad\x56\xac\x3e\x57\x37\x82\x08\xe4\x6f\x4b\xe9\xe8\x5e\x4f\xfb\x2c\x73\xe1\x80\xd8\x41\xcd\xd3\xe1\xd8\xa0\x31\x99\x67\x43\x8e\x76\x36\x16\xbe\x32\x5b\xa6\xd8', 2)
| 6,703
| 6,703
| 0.749963
| 1,671
| 6,703
| 3.001197
| 0.156194
| 0.026321
| 0.028714
| 0.026321
| 0.011964
| 0.008973
| 0.008973
| 0
| 0
| 0
| 0
| 0.313881
| 0.000448
| 6,703
| 1
| 6,703
| 6,703
| 0.434627
| 0
| 0
| 0
| 0
| 1
| 0.994033
| 0.994033
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
4f6f5e6db0cd1696a05a019bb203c94beba67cbc
| 51,454
|
py
|
Python
|
monasca_tempest_tests/tests/api/test_notification_methods.py
|
guilhermesteinmuller/monasca-tempest-plugin
|
e6eb044ba96164f8f089036291d5331382e33ccd
|
[
"Apache-2.0"
] | null | null | null |
monasca_tempest_tests/tests/api/test_notification_methods.py
|
guilhermesteinmuller/monasca-tempest-plugin
|
e6eb044ba96164f8f089036291d5331382e33ccd
|
[
"Apache-2.0"
] | null | null | null |
monasca_tempest_tests/tests/api/test_notification_methods.py
|
guilhermesteinmuller/monasca-tempest-plugin
|
e6eb044ba96164f8f089036291d5331382e33ccd
|
[
"Apache-2.0"
] | null | null | null |
# (C) Copyright 2015-2017 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import six.moves.urllib.parse as urlparse
from monasca_tempest_tests.tests.api import base
from monasca_tempest_tests.tests.api import constants
from monasca_tempest_tests.tests.api import helpers
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions
DEFAULT_EMAIL_ADDRESS = 'john.doe@domain.com'
class TestNotificationMethods(base.BaseMonascaTest):
@classmethod
def resource_setup(cls):
super(TestNotificationMethods, cls).resource_setup()
@classmethod
def resource_cleanup(cls):
super(TestNotificationMethods, cls).resource_cleanup()
@decorators.attr(type="gate")
def test_create_notification_method(self):
notification = helpers.create_notification()
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
id = response_body['id']
resp, response_body = self.monasca_client.\
delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_create_email_notification_method_with_lower_case_type(self):
notification = helpers.create_notification(name='lower case email notification',
type='email')
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
id = response_body['id']
resp, response_body = self.monasca_client.\
delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_create_email_notification_method_with_mixed_case_type(self):
notification = helpers.create_notification(name='mixed case email notification',
type='EmAil')
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
id = response_body['id']
resp, response_body = self.monasca_client.\
delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_create_notification_method_period_not_defined(self):
notification = helpers.create_notification(period=None)
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
id = response_body['id']
resp, response_body = self.monasca_client.\
delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_create_webhook_notification_method_with_non_zero_period(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name,
type='WEBHOOK',
address='http://localhost/test01',
period=60)
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
id = response_body['id']
resp, response_body = self.monasca_client.\
delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_create_notification_method_webhook_test_tld(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name,
type='WEBHOOK',
address='http://mytest.test/webhook',
period=60)
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
id = response_body['id']
resp, response_body = self.monasca_client.\
delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_create_notification_method_webhook_test_tld_and_port(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name,
type='WEBHOOK',
address='http://mytest.test:4533/webhook',
period=60)
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
id = response_body['id']
resp, response_body = self.monasca_client.\
delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_notification_method_with_no_name(self):
notification = helpers.create_notification(name=None)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_notification_method_with_no_type(self):
notification = helpers.create_notification(type=None)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_notification_method_with_no_address(self):
notification = helpers.create_notification(address=None)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_notification_method_with_name_exceeds_max_length(self):
long_name = "x" * (constants.MAX_NOTIFICATION_METHOD_NAME_LENGTH + 1)
notification = helpers.create_notification(name=long_name)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_notification_method_with_address_exceeds_max_length(self):
long_address = "x" * (
constants.MAX_NOTIFICATION_METHOD_ADDRESS_LENGTH + 1)
notification = helpers.create_notification(address=long_address)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_notification_method_with_invalid_email_address(self):
notification = helpers.create_notification(address="name@")
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_notification_method_with_invalid_scheme_webhook(self):
notification = helpers.create_notification(type="WEBHOOK",
address="ftp://localhost")
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_notification_method_with_invalid_webhook_address(self):
notification = helpers.create_notification(type="WEBHOOK",
address="localhost:123")
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
# The below tests are making sure that we accept passing in case insensitive types
# and that we still validate the
# address if the types are case insensitive
@decorators.attr(type="gate")
def test_create_notification_method_webhook_with_lower_case_type(self):
notification = helpers.create_notification(type='webhook',
address='http://mytest.test:4533')
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
id = response_body['id']
resp, response_body = self.monasca_client.\
delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_create_notification_method_webhook_with_mixed_case_type(self):
notification = helpers.create_notification(type='webHooK',
address='http://mytest.test:4533')
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
id = response_body['id']
resp, response_body = self.monasca_client.\
delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_notification_method_with_invalid_email_address_type_all_lower_case(self):
notification = helpers.create_notification(type="email",
address="name@")
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_notification_method_with_invalid_email_address_type_all_mixed_case(self):
notification = helpers.create_notification(type="EmAil",
address="name@")
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_notification_method_with_invalid_webhook_address_type_mixed_case(self):
notification = helpers.create_notification(type="WebHook",
address="localhost:123")
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_notification_method_with_invalid_webhook_address_type_lower_case(self):
notification = helpers.create_notification(type="webhook",
address="localhost:123")
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_notification_method_with_invalid_type(self):
notification = helpers.create_notification(type='random')
self.assertRaises(
(exceptions.BadRequest, exceptions.NotFound, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_notification_method_with_invalid_float_period(self):
notification = helpers.create_notification(period=1.2)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_notification_method_with_invalid_string_period(self):
notification = helpers.create_notification(period='random')
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_email_notification_method_with_invalid_non_zero_period(self):
notification = helpers.create_notification(period=60)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_pagerduty_notification_method_with_invalid_non_zero_period(self):
notification = helpers.create_notification(type='PAGERDUTY',
address='test03@localhost',
period=60)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_create_webhook_notification_method_with_invalid_period(self):
notification = helpers.create_notification(type='WEBHOOK',
address='http://localhost/test01',
period=10)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.create_notifications,
notification)
@decorators.attr(type="gate")
def test_list_notification_methods(self):
notification = helpers.create_notification()
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
id = response_body['id']
resp, response_body = self.monasca_client.list_notification_methods()
self.assertEqual(200, resp.status)
# Test response body
self.assertTrue(set(['links', 'elements']) == set(response_body))
elements = response_body['elements']
element = elements[0]
self.assertTrue(set(['id', 'links', 'name', 'type', 'address', 'period']) ==
set(element))
self.assertTrue(type(element['id']) is unicode)
self.assertTrue(type(element['links']) is list)
self.assertTrue(type(element['name']) is unicode)
self.assertTrue(type(element['type']) is unicode)
self.assertTrue(type(element['address']) is unicode)
resp, response_body = self.monasca_client.\
delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_list_notification_methods_sort_by(self):
notifications = [helpers.create_notification(
name='notification sort by 01',
type='PAGERDUTY',
address='test03@localhost',
), helpers.create_notification(
name='notification sort by 02',
type='WEBHOOK',
address='http://localhost/test01',
), helpers.create_notification(
name='notification sort by 03',
type='EMAIL',
address='test02@localhost',
)]
for notification in notifications:
resp, response_body = self.monasca_client.create_notifications(notification)
notification['id'] = response_body['id']
time.sleep(1)
sort_params1 = ['id', 'name', 'type', 'address']
for sort_by in sort_params1:
notif_sorted_by = sorted(notifications,
key=lambda obj: obj[sort_by])
resp, response_body = self.monasca_client.list_notification_methods(
'?sort_by=' + sort_by)
self.assertEqual(200, resp.status)
for i, element in enumerate(response_body['elements']):
self.assertEqual(notif_sorted_by[i][sort_by], element[sort_by])
resp, response_body = self.monasca_client.list_notification_methods(
'?sort_by=' + sort_by + urlparse.quote(' asc'))
self.assertEqual(200, resp.status)
for i, element in enumerate(response_body['elements']):
self.assertEqual(notif_sorted_by[i][sort_by], element[sort_by])
notif_sorted_by_reverse = sorted(notifications,
key=lambda obj: obj[sort_by],
reverse=True)
resp, response_body = self.monasca_client.list_notification_methods(
'?sort_by=' + sort_by + urlparse.quote(' desc'))
self.assertEqual(200, resp.status)
for i, element in enumerate(response_body['elements']):
self.assertEqual(notif_sorted_by_reverse[i][sort_by], element[sort_by])
sort_params2 = ['created_at', 'updated_at']
for sort_by in sort_params2:
resp, response_body = self.monasca_client.list_notification_methods(
'?sort_by=' + sort_by)
self.assertEqual(200, resp.status)
for i, element in enumerate(response_body['elements']):
self.assertEqual(notifications[i]['id'], element['id'])
resp, response_body = self.monasca_client.list_notification_methods(
'?sort_by=' + sort_by + urlparse.quote(' asc'))
self.assertEqual(200, resp.status)
for i, element in enumerate(response_body['elements']):
self.assertEqual(notifications[i]['id'], element['id'])
resp, response_body = self.monasca_client.list_notification_methods(
'?sort_by=' + sort_by + urlparse.quote(' desc'))
self.assertEqual(200, resp.status)
for i, element in enumerate(response_body['elements']):
self.assertEqual(notifications[-i - 1]['id'], element['id'])
for notification in notifications:
self.monasca_client.delete_notification_method(notification['id'])
@decorators.attr(type="gate")
def test_list_notification_methods_multiple_sort_by(self):
notifications = [helpers.create_notification(
name='notification sort by 01',
type='EMAIL',
address='test02@localhost',
), helpers.create_notification(
name='notification sort by 02',
type='PAGERDUTY',
address='test03@localhost',
), helpers.create_notification(
name='notification sort by 03',
type='EMAIL',
address='test04@localhost',
), helpers.create_notification(
name='notification sort by 04',
type='EMAIL',
address='test01@localhost',
)]
for notification in notifications:
resp, response_body = self.monasca_client.create_notifications(notification)
notification['id'] = response_body['id']
resp, response_body = self.monasca_client.list_notification_methods(
'?sort_by=' + urlparse.quote('type asc,address desc,id'))
self.assertEqual(200, resp.status)
expected_order = [2, 0, 3, 1]
for i, element in enumerate(response_body['elements']):
self.assertEqual(notifications[expected_order[i]]['id'], element['id'])
for element in response_body['elements']:
self.monasca_client.delete_notification_method(element['id'])
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_list_notification_methods_invalid_sort_by(self):
query_parms = '?sort_by=random'
self.assertRaises(exceptions.UnprocessableEntity,
self.monasca_client.list_notification_methods,
query_parms)
@decorators.attr(type="gate")
def test_list_notification_methods_with_offset_limit(self):
name1 = data_utils.rand_name('notification')
name2 = data_utils.rand_name('notification')
name3 = data_utils.rand_name('notification')
name4 = data_utils.rand_name('notification')
notification1 = helpers.create_notification(name=name1)
notification2 = helpers.create_notification(name=name2)
notification3 = helpers.create_notification(name=name3)
notification4 = helpers.create_notification(name=name4)
resp, response_body = self.monasca_client.create_notifications(
notification1)
id1 = response_body['id']
self.assertEqual(201, resp.status)
resp, response_body = self.monasca_client.create_notifications(
notification2)
id2 = response_body['id']
self.assertEqual(201, resp.status)
resp, response_body = self.monasca_client.create_notifications(
notification3)
id3 = response_body['id']
self.assertEqual(201, resp.status)
resp, response_body = self.monasca_client.create_notifications(
notification4)
id4 = response_body['id']
self.assertEqual(201, resp.status)
resp, response_body = self.monasca_client.list_notification_methods()
elements = response_body['elements']
first_element = elements[0]
last_element = elements[3]
query_parms = '?limit=4'
resp, response_body = self.monasca_client.\
list_notification_methods(query_parms)
self.assertEqual(200, resp.status)
self.assertEqual(4, len(response_body['elements']))
self.assertEqual(first_element, response_body['elements'][0])
timeout = time.time() + 60 * 1 # 1 minute timeout
for limit in range(1, 5):
next_element = elements[limit - 1]
offset = limit
while True:
if time.time() < timeout:
query_parms = '?offset=' + str(offset) + \
'&limit=' + str(limit)
resp, response_body = self.monasca_client.\
list_notification_methods(query_parms)
self.assertEqual(200, resp.status)
new_elements = response_body['elements']
if len(new_elements) > limit - 1:
self.assertEqual(limit, len(new_elements))
next_element = new_elements[limit - 1]
offset += 1
elif 0 < len(new_elements) <= limit - 1:
self.assertEqual(last_element, new_elements[0])
break
else:
self.assertEqual(last_element, next_element)
break
else:
msg = "Failed " \
"test_list_notification_methods_with_offset_limit:" \
" one minute timeout on offset limit test loop."
raise exceptions.TimeoutException(msg)
resp, response_body = self.monasca_client.\
delete_notification_method(id1)
self.assertEqual(204, resp.status)
resp, response_body = self.monasca_client.\
delete_notification_method(id2)
self.assertEqual(204, resp.status)
resp, response_body = self.monasca_client.\
delete_notification_method(id3)
self.assertEqual(204, resp.status)
resp, response_body = self.monasca_client.\
delete_notification_method(id4)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_get_notification_method(self):
notification = helpers.create_notification()
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
id = response_body['id']
resp, response_body = self.monasca_client.get_notification_method(id)
self.assertEqual(200, resp.status)
resp, response_body = self.monasca_client.\
delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_get_notification_method_with_invalid_id(self):
notification = helpers.create_notification()
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
id = data_utils.rand_name()
self.assertRaises(exceptions.NotFound,
self.monasca_client.get_notification_method,
id)
resp, response_body = self.monasca_client.\
delete_notification_method(response_body['id'])
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_update_notification_method_name(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name)
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
self.assertEqual(name, response_body['name'])
id = response_body['id']
new_name = name + 'update'
resp, response_body = self.monasca_client.\
update_notification_method(id, new_name,
type=response_body['type'],
address=response_body['address'],
period=response_body['period'])
self.assertEqual(200, resp.status)
self.assertEqual(new_name, response_body['name'])
resp, response_body = self.monasca_client.\
delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_update_notification_method_type(self):
type = 'EMAIL'
notification = helpers.create_notification(type=type)
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
self.assertEqual(type, response_body['type'])
id = response_body['id']
new_type = 'PAGERDUTY'
resp, response_body = \
self.monasca_client.\
update_notification_method(id, name=response_body['name'],
type=new_type,
address=response_body['address'],
period=response_body['period'])
self.assertEqual(200, resp.status)
self.assertEqual(new_type, response_body['type'])
resp, response_body = self.monasca_client.\
delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_update_notification_method_address(self):
address = DEFAULT_EMAIL_ADDRESS
notification = helpers.create_notification(address=address)
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
self.assertEqual(address, response_body['address'])
id = response_body['id']
new_address = 'jane.doe@domain.com'
resp, response_body = self.monasca_client.\
update_notification_method(id,
name=response_body['name'],
type=response_body['type'],
address=new_address,
period=0)
self.assertEqual(200, resp.status)
self.assertEqual(new_address, response_body['address'])
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_update_notification_method_name_exceeds_max_length(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name)
resp, response_body = self.monasca_client.create_notifications(
notification)
id = response_body['id']
self.assertEqual(201, resp.status)
new_name_long = "x" * (constants.MAX_NOTIFICATION_METHOD_NAME_LENGTH
+ 1)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.update_notification_method, id,
name=new_name_long, type=response_body['type'],
address=response_body['address'], period=response_body['period'])
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_update_notification_method_invalid_type(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name)
resp, response_body = self.monasca_client.create_notifications(
notification)
id = response_body['id']
self.assertEqual(201, resp.status)
self.assertRaises(
(exceptions.BadRequest, exceptions.NotFound, exceptions.UnprocessableEntity),
self.monasca_client.update_notification_method,
id, name=response_body['name'], type='random',
address=response_body['address'], period=0)
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_update_notification_method_address_exceeds_max_length(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name)
resp, response_body = self.monasca_client.create_notifications(
notification)
id = response_body['id']
self.assertEqual(201, resp.status)
new_address_long = "x" * (
constants.MAX_NOTIFICATION_METHOD_ADDRESS_LENGTH + 1)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.update_notification_method, id,
name=response_body['name'], type=response_body['type'],
address=new_address_long, period=response_body['period'])
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_update_notification_method_with_no_address(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name)
resp, response_body = self.monasca_client.create_notifications(
notification)
id = response_body['id']
self.assertEqual(201, resp.status)
self.assertRaises(
(exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.update_notification_method_with_no_address, id,
name="test_update_notification_method_name",
type=response_body['type'])
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_create_and_delete_notification_method(self):
notification = helpers.create_notification()
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
id = response_body['id']
resp, response_body = self.monasca_client.\
delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_delete_notification_method_with_invalid_id(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name)
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
id = data_utils.rand_name()
self.assertRaises(exceptions.NotFound,
self.monasca_client.delete_notification_method,
id)
resp, response_body = self.monasca_client.\
delete_notification_method(response_body['id'])
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_patch_notification_method_with_invalid_id(self):
id = data_utils.rand_name()
name = data_utils.rand_name('notification-')
self.assertRaises(exceptions.NotFound,
self.monasca_client.patch_notification_method,
id, name)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_update_notification_method_with_invalid_id(self):
id = data_utils.rand_name()
name = data_utils.rand_name('notification-')
self.assertRaises(exceptions.NotFound,
self.monasca_client.update_notification_method, id,
name=name, type='EMAIL',
address='bob@thebridge.org', period=0)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_update_email_notification_method_with_nonzero_period(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name)
resp, response_body = self.monasca_client.create_notifications(
notification)
id = response_body['id']
self.assertEqual(201, resp.status)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.update_notification_method, id,
name=response_body['name'], type=response_body['type'],
address=response_body['address'], period=60)
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_update_webhook_notification_method_to_email_with_nonzero_period(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name,
type='WEBHOOK',
address='http://localhost/test01',
period=60)
resp, response_body = self.monasca_client.create_notifications(
notification)
id = response_body['id']
self.assertEqual(201, resp.status)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.update_notification_method, id,
name=response_body['name'], type='EMAIL',
address='test@localhost', period=response_body['period'])
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_update_webhook_notification_method_to_pagerduty_with_nonzero_period(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name,
type='WEBHOOK',
address='http://localhost/test01',
period=60)
resp, response_body = self.monasca_client.create_notifications(
notification)
id = response_body['id']
self.assertEqual(201, resp.status)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.update_notification_method, id,
name=response_body['name'], type='PAGERDUTY',
address='test@localhost', period=response_body['period'])
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_update_notification_method_with_non_int_period(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name)
resp, response_body = self.monasca_client.create_notifications(
notification)
id = response_body['id']
self.assertEqual(201, resp.status)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.update_notification_method, id,
name=response_body['name'], type=response_body['type'],
address=response_body['name'], period='zero')
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_update_webhook_notification_method_with_invalid_period(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name,
type='WEBHOOK',
address='http://localhost/test01',
period=60)
resp, response_body = self.monasca_client.create_notifications(
notification)
id = response_body['id']
self.assertEqual(201, resp.status)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.update_notification_method, id,
name=response_body['name'], type=response_body['type'],
address=response_body['address'], period=5)
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_patch_notification_method_name(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name)
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
self.assertEqual(name, response_body['name'])
id = response_body['id']
new_name = name + 'update'
resp, response_body = self.monasca_client.\
patch_notification_method(id, new_name)
self.assertEqual(200, resp.status)
self.assertEqual(new_name, response_body['name'])
resp, response_body = self.monasca_client.\
delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_patch_notification_method_type(self):
type = 'EMAIL'
notification = helpers.create_notification(type=type)
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
self.assertEqual(type, response_body['type'])
id = response_body['id']
new_type = 'PAGERDUTY'
resp, response_body = \
self.monasca_client.\
patch_notification_method(id, type=new_type)
self.assertEqual(200, resp.status)
self.assertEqual(new_type, response_body['type'])
resp, response_body = self.monasca_client.\
delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_patch_notification_method_address(self):
address = DEFAULT_EMAIL_ADDRESS
notification = helpers.create_notification(address=address)
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
self.assertEqual(address, response_body['address'])
id = response_body['id']
new_address = 'jane.doe@domain.com'
resp, response_body = self.monasca_client.\
patch_notification_method(id, address=new_address)
self.assertEqual(200, resp.status)
self.assertEqual(new_address, response_body['address'])
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
def test_patch_notification_method_address_period(self):
type = 'WEBHOOK'
notification = helpers.create_notification(
type=type, address='http://localhost/test01', period=60)
resp, response_body = self.monasca_client.create_notifications(
notification)
self.assertEqual(201, resp.status)
self.assertEqual(type, response_body['type'])
id = response_body['id']
# test_patch_webhook_notification_to_email_with_zero_period
new_type = 'EMAIL'
new_period = 0
resp, response_body = \
self.monasca_client.\
patch_notification_method(id, type=new_type,
address='john.doe@domain.com',
period=new_period)
self.assertEqual(200, resp.status)
self.assertEqual(new_type, response_body['type'])
self.assertEqual(new_period, response_body['period'])
# test_patch_email_notification_to_webhook_with_nonzero_period
new_type = 'WEBHOOK'
new_period = 60
resp, response_body = \
self.monasca_client.\
patch_notification_method(id, type=new_type,
address='http://localhost/test01',
period=new_period)
self.assertEqual(200, resp.status)
self.assertEqual(new_type, response_body['type'])
self.assertEqual(new_period, response_body['period'])
resp, response_body = self.monasca_client.\
delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_patch_notification_method_name_exceeds_max_length(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name)
resp, response_body = self.monasca_client.create_notifications(
notification)
id = response_body['id']
self.assertEqual(201, resp.status)
new_name_long = "x" * (constants.MAX_NOTIFICATION_METHOD_NAME_LENGTH
+ 1)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.patch_notification_method, id,
name=new_name_long)
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_patch_notification_method_invalid_type(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name)
resp, response_body = self.monasca_client.create_notifications(
notification)
id = response_body['id']
self.assertEqual(201, resp.status)
self.assertRaises(
(exceptions.BadRequest, exceptions.NotFound, exceptions.UnprocessableEntity),
self.monasca_client.patch_notification_method, id, type='random')
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_patch_notification_method_address_exceeds_max_length(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name)
resp, response_body = self.monasca_client.create_notifications(
notification)
id = response_body['id']
self.assertEqual(201, resp.status)
new_address_long = "x" * (
constants.MAX_NOTIFICATION_METHOD_ADDRESS_LENGTH + 1)
self.assertRaises(
(exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.patch_notification_method, id, address=new_address_long)
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_patch_email_notification_method_with_nonzero_period(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name)
resp, response_body = self.monasca_client.create_notifications(
notification)
id = response_body['id']
self.assertEqual(201, resp.status)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.patch_notification_method, id, period=60)
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_patch_webhook_notification_method_to_email_with_nonzero_period(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name,
type='WEBHOOK',
address='http://localhost/test01',
period=60)
resp, response_body = self.monasca_client.create_notifications(
notification)
id = response_body['id']
self.assertEqual(201, resp.status)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.patch_notification_method, id, type='EMAIL')
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_patch_webhook_notification_method_to_pagerduty_with_nonzero_period(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name,
type='WEBHOOK',
address='http://localhost/test01',
period=60)
resp, response_body = self.monasca_client.create_notifications(
notification)
id = response_body['id']
self.assertEqual(201, resp.status)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.patch_notification_method, id, type='PAGERDUTY')
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_patch_notification_method_with_non_int_period(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name)
resp, response_body = self.monasca_client.create_notifications(
notification)
id = response_body['id']
self.assertEqual(201, resp.status)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.patch_notification_method, id, period='zero')
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
@decorators.attr(type="gate")
@decorators.attr(type=['negative'])
def test_patch_webhook_notification_method_with_invalid_period(self):
name = data_utils.rand_name('notification-')
notification = helpers.create_notification(name=name,
type='WEBHOOK',
address='http://localhost/test01',
period=60)
resp, response_body = self.monasca_client.create_notifications(
notification)
id = response_body['id']
self.assertEqual(201, resp.status)
self.assertRaises((exceptions.BadRequest, exceptions.UnprocessableEntity),
self.monasca_client.patch_notification_method, id, period=5)
resp, response_body = \
self.monasca_client.delete_notification_method(id)
self.assertEqual(204, resp.status)
| 47.59852
| 94
| 0.630622
| 5,114
| 51,454
| 6.086038
| 0.051036
| 0.081738
| 0.080838
| 0.068115
| 0.912576
| 0.893715
| 0.874084
| 0.851337
| 0.835111
| 0.820556
| 0
| 0.013149
| 0.274284
| 51,454
| 1,080
| 95
| 47.642593
| 0.820358
| 0.017433
| 0
| 0.757829
| 0
| 0
| 0.058671
| 0.001682
| 0
| 0
| 0
| 0
| 0.186848
| 1
| 0.066806
| false
| 0
| 0.008351
| 0
| 0.0762
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4f70ef2afb8e70bb4ea4f560e60314ff26cc3966
| 39,735
|
py
|
Python
|
databuilder/sql_parser/usage/presto/antlr_generated/SqlBaseListener.py
|
feng-tao/amundsendatabuilder
|
2c2b843ebd0ca08198e4940e668ea09e71335c12
|
[
"Apache-2.0"
] | null | null | null |
databuilder/sql_parser/usage/presto/antlr_generated/SqlBaseListener.py
|
feng-tao/amundsendatabuilder
|
2c2b843ebd0ca08198e4940e668ea09e71335c12
|
[
"Apache-2.0"
] | null | null | null |
databuilder/sql_parser/usage/presto/antlr_generated/SqlBaseListener.py
|
feng-tao/amundsendatabuilder
|
2c2b843ebd0ca08198e4940e668ea09e71335c12
|
[
"Apache-2.0"
] | 1
|
2019-09-21T23:56:41.000Z
|
2019-09-21T23:56:41.000Z
|
# Generated from SqlBase.g4 by ANTLR 4.7.1
from antlr4 import *
# This class defines a complete listener for a parse tree produced by SqlBaseParser.
class SqlBaseListener(ParseTreeListener):
# Enter a parse tree produced by SqlBaseParser#singleStatement.
def enterSingleStatement(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#singleStatement.
def exitSingleStatement(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#singleExpression.
def enterSingleExpression(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#singleExpression.
def exitSingleExpression(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#statementDefault.
def enterStatementDefault(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#statementDefault.
def exitStatementDefault(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#use.
def enterUse(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#use.
def exitUse(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#createSchema.
def enterCreateSchema(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#createSchema.
def exitCreateSchema(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#dropSchema.
def enterDropSchema(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#dropSchema.
def exitDropSchema(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#renameSchema.
def enterRenameSchema(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#renameSchema.
def exitRenameSchema(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#createTableAsSelect.
def enterCreateTableAsSelect(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#createTableAsSelect.
def exitCreateTableAsSelect(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#createTable.
def enterCreateTable(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#createTable.
def exitCreateTable(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#dropTable.
def enterDropTable(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#dropTable.
def exitDropTable(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#insertInto.
def enterInsertInto(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#insertInto.
def exitInsertInto(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#delete.
def enterDelete(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#delete.
def exitDelete(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#renameTable.
def enterRenameTable(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#renameTable.
def exitRenameTable(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#renameColumn.
def enterRenameColumn(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#renameColumn.
def exitRenameColumn(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#dropColumn.
def enterDropColumn(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#dropColumn.
def exitDropColumn(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#addColumn.
def enterAddColumn(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#addColumn.
def exitAddColumn(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#createView.
def enterCreateView(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#createView.
def exitCreateView(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#dropView.
def enterDropView(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#dropView.
def exitDropView(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#call.
def enterCall(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#call.
def exitCall(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#grant.
def enterGrant(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#grant.
def exitGrant(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#revoke.
def enterRevoke(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#revoke.
def exitRevoke(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#showGrants.
def enterShowGrants(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#showGrants.
def exitShowGrants(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#explain.
def enterExplain(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#explain.
def exitExplain(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#showCreateTable.
def enterShowCreateTable(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#showCreateTable.
def exitShowCreateTable(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#showCreateView.
def enterShowCreateView(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#showCreateView.
def exitShowCreateView(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#showTables.
def enterShowTables(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#showTables.
def exitShowTables(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#showSchemas.
def enterShowSchemas(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#showSchemas.
def exitShowSchemas(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#showCatalogs.
def enterShowCatalogs(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#showCatalogs.
def exitShowCatalogs(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#showColumns.
def enterShowColumns(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#showColumns.
def exitShowColumns(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#showStats.
def enterShowStats(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#showStats.
def exitShowStats(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#showStatsForQuery.
def enterShowStatsForQuery(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#showStatsForQuery.
def exitShowStatsForQuery(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#showFunctions.
def enterShowFunctions(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#showFunctions.
def exitShowFunctions(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#showSession.
def enterShowSession(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#showSession.
def exitShowSession(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#setSession.
def enterSetSession(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#setSession.
def exitSetSession(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#resetSession.
def enterResetSession(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#resetSession.
def exitResetSession(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#startTransaction.
def enterStartTransaction(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#startTransaction.
def exitStartTransaction(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#commit.
def enterCommit(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#commit.
def exitCommit(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#rollback.
def enterRollback(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#rollback.
def exitRollback(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#showPartitions.
def enterShowPartitions(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#showPartitions.
def exitShowPartitions(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#prepare.
def enterPrepare(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#prepare.
def exitPrepare(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#deallocate.
def enterDeallocate(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#deallocate.
def exitDeallocate(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#execute.
def enterExecute(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#execute.
def exitExecute(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#describeInput.
def enterDescribeInput(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#describeInput.
def exitDescribeInput(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#describeOutput.
def enterDescribeOutput(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#describeOutput.
def exitDescribeOutput(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#query.
def enterQuery(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#query.
def exitQuery(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#with.
def enterWith(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#with.
def exitWith(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#tableElement.
def enterTableElement(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#tableElement.
def exitTableElement(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#columnDefinition.
def enterColumnDefinition(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#columnDefinition.
def exitColumnDefinition(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#likeClause.
def enterLikeClause(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#likeClause.
def exitLikeClause(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#properties.
def enterProperties(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#properties.
def exitProperties(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#property.
def enterProperty(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#property.
def exitProperty(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#queryNoWith.
def enterQueryNoWith(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#queryNoWith.
def exitQueryNoWith(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#queryTermDefault.
def enterQueryTermDefault(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#queryTermDefault.
def exitQueryTermDefault(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#setOperation.
def enterSetOperation(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#setOperation.
def exitSetOperation(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#queryPrimaryDefault.
def enterQueryPrimaryDefault(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#queryPrimaryDefault.
def exitQueryPrimaryDefault(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#table.
def enterTable(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#table.
def exitTable(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#inlineTable.
def enterInlineTable(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#inlineTable.
def exitInlineTable(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#subquery.
def enterSubquery(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#subquery.
def exitSubquery(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#sortItem.
def enterSortItem(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#sortItem.
def exitSortItem(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#querySpecification.
def enterQuerySpecification(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#querySpecification.
def exitQuerySpecification(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#groupBy.
def enterGroupBy(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#groupBy.
def exitGroupBy(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#singleGroupingSet.
def enterSingleGroupingSet(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#singleGroupingSet.
def exitSingleGroupingSet(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#rollup.
def enterRollup(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#rollup.
def exitRollup(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#cube.
def enterCube(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#cube.
def exitCube(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#multipleGroupingSets.
def enterMultipleGroupingSets(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#multipleGroupingSets.
def exitMultipleGroupingSets(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#groupingExpressions.
def enterGroupingExpressions(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#groupingExpressions.
def exitGroupingExpressions(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#groupingSet.
def enterGroupingSet(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#groupingSet.
def exitGroupingSet(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#namedQuery.
def enterNamedQuery(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#namedQuery.
def exitNamedQuery(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#setQuantifier.
def enterSetQuantifier(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#setQuantifier.
def exitSetQuantifier(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#selectSingle.
def enterSelectSingle(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#selectSingle.
def exitSelectSingle(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#selectAll.
def enterSelectAll(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#selectAll.
def exitSelectAll(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#relationDefault.
def enterRelationDefault(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#relationDefault.
def exitRelationDefault(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#joinRelation.
def enterJoinRelation(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#joinRelation.
def exitJoinRelation(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#joinType.
def enterJoinType(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#joinType.
def exitJoinType(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#joinCriteria.
def enterJoinCriteria(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#joinCriteria.
def exitJoinCriteria(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#sampledRelation.
def enterSampledRelation(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#sampledRelation.
def exitSampledRelation(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#sampleType.
def enterSampleType(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#sampleType.
def exitSampleType(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#aliasedRelation.
def enterAliasedRelation(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#aliasedRelation.
def exitAliasedRelation(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#columnAliases.
def enterColumnAliases(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#columnAliases.
def exitColumnAliases(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#tableName.
def enterTableName(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#tableName.
def exitTableName(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#subqueryRelation.
def enterSubqueryRelation(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#subqueryRelation.
def exitSubqueryRelation(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#unnest.
def enterUnnest(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#unnest.
def exitUnnest(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#lateral.
def enterLateral(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#lateral.
def exitLateral(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#parenthesizedRelation.
def enterParenthesizedRelation(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#parenthesizedRelation.
def exitParenthesizedRelation(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#expression.
def enterExpression(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#expression.
def exitExpression(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#logicalNot.
def enterLogicalNot(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#logicalNot.
def exitLogicalNot(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#booleanDefault.
def enterBooleanDefault(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#booleanDefault.
def exitBooleanDefault(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#logicalBinary.
def enterLogicalBinary(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#logicalBinary.
def exitLogicalBinary(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#predicated.
def enterPredicated(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#predicated.
def exitPredicated(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#comparison.
def enterComparison(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#comparison.
def exitComparison(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#quantifiedComparison.
def enterQuantifiedComparison(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#quantifiedComparison.
def exitQuantifiedComparison(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#between.
def enterBetween(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#between.
def exitBetween(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#inList.
def enterInList(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#inList.
def exitInList(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#inSubquery.
def enterInSubquery(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#inSubquery.
def exitInSubquery(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#like.
def enterLike(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#like.
def exitLike(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#nullPredicate.
def enterNullPredicate(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#nullPredicate.
def exitNullPredicate(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#distinctFrom.
def enterDistinctFrom(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#distinctFrom.
def exitDistinctFrom(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#valueExpressionDefault.
def enterValueExpressionDefault(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#valueExpressionDefault.
def exitValueExpressionDefault(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#concatenation.
def enterConcatenation(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#concatenation.
def exitConcatenation(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#arithmeticBinary.
def enterArithmeticBinary(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#arithmeticBinary.
def exitArithmeticBinary(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#arithmeticUnary.
def enterArithmeticUnary(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#arithmeticUnary.
def exitArithmeticUnary(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#atTimeZone.
def enterAtTimeZone(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#atTimeZone.
def exitAtTimeZone(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#dereference.
def enterDereference(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#dereference.
def exitDereference(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#typeConstructor.
def enterTypeConstructor(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#typeConstructor.
def exitTypeConstructor(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#specialDateTimeFunction.
def enterSpecialDateTimeFunction(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#specialDateTimeFunction.
def exitSpecialDateTimeFunction(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#substring.
def enterSubstring(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#substring.
def exitSubstring(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#cast.
def enterCast(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#cast.
def exitCast(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#lambda.
def enterLambda(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#lambda.
def exitLambda(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#parenthesizedExpression.
def enterParenthesizedExpression(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#parenthesizedExpression.
def exitParenthesizedExpression(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#parameter.
def enterParameter(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#parameter.
def exitParameter(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#normalize.
def enterNormalize(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#normalize.
def exitNormalize(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#intervalLiteral.
def enterIntervalLiteral(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#intervalLiteral.
def exitIntervalLiteral(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#numericLiteral.
def enterNumericLiteral(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#numericLiteral.
def exitNumericLiteral(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#booleanLiteral.
def enterBooleanLiteral(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#booleanLiteral.
def exitBooleanLiteral(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#simpleCase.
def enterSimpleCase(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#simpleCase.
def exitSimpleCase(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#columnReference.
def enterColumnReference(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#columnReference.
def exitColumnReference(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#nullLiteral.
def enterNullLiteral(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#nullLiteral.
def exitNullLiteral(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#rowConstructor.
def enterRowConstructor(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#rowConstructor.
def exitRowConstructor(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#subscript.
def enterSubscript(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#subscript.
def exitSubscript(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#subqueryExpression.
def enterSubqueryExpression(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#subqueryExpression.
def exitSubqueryExpression(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#binaryLiteral.
def enterBinaryLiteral(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#binaryLiteral.
def exitBinaryLiteral(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#currentUser.
def enterCurrentUser(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#currentUser.
def exitCurrentUser(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#extract.
def enterExtract(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#extract.
def exitExtract(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#stringLiteral.
def enterStringLiteral(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#stringLiteral.
def exitStringLiteral(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#arrayConstructor.
def enterArrayConstructor(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#arrayConstructor.
def exitArrayConstructor(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#functionCall.
def enterFunctionCall(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#functionCall.
def exitFunctionCall(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#exists.
def enterExists(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#exists.
def exitExists(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#position.
def enterPosition(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#position.
def exitPosition(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#searchedCase.
def enterSearchedCase(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#searchedCase.
def exitSearchedCase(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#groupingOperation.
def enterGroupingOperation(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#groupingOperation.
def exitGroupingOperation(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#basicStringLiteral.
def enterBasicStringLiteral(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#basicStringLiteral.
def exitBasicStringLiteral(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#unicodeStringLiteral.
def enterUnicodeStringLiteral(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#unicodeStringLiteral.
def exitUnicodeStringLiteral(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#timeZoneInterval.
def enterTimeZoneInterval(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#timeZoneInterval.
def exitTimeZoneInterval(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#timeZoneString.
def enterTimeZoneString(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#timeZoneString.
def exitTimeZoneString(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#comparisonOperator.
def enterComparisonOperator(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#comparisonOperator.
def exitComparisonOperator(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#comparisonQuantifier.
def enterComparisonQuantifier(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#comparisonQuantifier.
def exitComparisonQuantifier(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#booleanValue.
def enterBooleanValue(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#booleanValue.
def exitBooleanValue(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#interval.
def enterInterval(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#interval.
def exitInterval(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#intervalField.
def enterIntervalField(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#intervalField.
def exitIntervalField(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#normalForm.
def enterNormalForm(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#normalForm.
def exitNormalForm(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#type.
def enterType(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#type.
def exitType(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#typeParameter.
def enterTypeParameter(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#typeParameter.
def exitTypeParameter(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#baseType.
def enterBaseType(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#baseType.
def exitBaseType(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#whenClause.
def enterWhenClause(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#whenClause.
def exitWhenClause(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#filter.
def enterFilter(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#filter.
def exitFilter(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#over.
def enterOver(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#over.
def exitOver(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#windowFrame.
def enterWindowFrame(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#windowFrame.
def exitWindowFrame(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#unboundedFrame.
def enterUnboundedFrame(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#unboundedFrame.
def exitUnboundedFrame(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#currentRowBound.
def enterCurrentRowBound(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#currentRowBound.
def exitCurrentRowBound(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#boundedFrame.
def enterBoundedFrame(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#boundedFrame.
def exitBoundedFrame(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#explainFormat.
def enterExplainFormat(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#explainFormat.
def exitExplainFormat(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#explainType.
def enterExplainType(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#explainType.
def exitExplainType(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#isolationLevel.
def enterIsolationLevel(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#isolationLevel.
def exitIsolationLevel(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#transactionAccessMode.
def enterTransactionAccessMode(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#transactionAccessMode.
def exitTransactionAccessMode(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#readUncommitted.
def enterReadUncommitted(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#readUncommitted.
def exitReadUncommitted(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#readCommitted.
def enterReadCommitted(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#readCommitted.
def exitReadCommitted(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#repeatableRead.
def enterRepeatableRead(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#repeatableRead.
def exitRepeatableRead(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#serializable.
def enterSerializable(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#serializable.
def exitSerializable(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#positionalArgument.
def enterPositionalArgument(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#positionalArgument.
def exitPositionalArgument(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#namedArgument.
def enterNamedArgument(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#namedArgument.
def exitNamedArgument(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#privilege.
def enterPrivilege(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#privilege.
def exitPrivilege(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#qualifiedName.
def enterQualifiedName(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#qualifiedName.
def exitQualifiedName(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#unquotedIdentifier.
def enterUnquotedIdentifier(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#unquotedIdentifier.
def exitUnquotedIdentifier(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#quotedIdentifier.
def enterQuotedIdentifier(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#quotedIdentifier.
def exitQuotedIdentifier(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#backQuotedIdentifier.
def enterBackQuotedIdentifier(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#backQuotedIdentifier.
def exitBackQuotedIdentifier(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#digitIdentifier.
def enterDigitIdentifier(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#digitIdentifier.
def exitDigitIdentifier(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#decimalLiteral.
def enterDecimalLiteral(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#decimalLiteral.
def exitDecimalLiteral(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#doubleLiteral.
def enterDoubleLiteral(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#doubleLiteral.
def exitDoubleLiteral(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#integerLiteral.
def enterIntegerLiteral(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#integerLiteral.
def exitIntegerLiteral(self, ctx):
pass
# Enter a parse tree produced by SqlBaseParser#nonReserved.
def enterNonReserved(self, ctx):
pass
# Exit a parse tree produced by SqlBaseParser#nonReserved.
def exitNonReserved(self, ctx):
pass
| 25.85231
| 84
| 0.686699
| 4,448
| 39,735
| 6.134442
| 0.121403
| 0.074984
| 0.124973
| 0.224951
| 0.790808
| 0.790808
| 0.789599
| 0.789416
| 0.602543
| 0.602543
| 0
| 0.000168
| 0.253152
| 39,735
| 1,536
| 85
| 25.869141
| 0.919295
| 0.494249
| 0
| 0.498534
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.498534
| false
| 0.498534
| 0.001466
| 0
| 0.501466
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
4f7d1bb1439519c12b9c85a7e84562b68b6114c6
| 230
|
py
|
Python
|
application.py
|
ds-vologdin/cuttlefish-web-framework
|
e83f04c7563432a89a549ad8106d9439fadb0e70
|
[
"MIT"
] | null | null | null |
application.py
|
ds-vologdin/cuttlefish-web-framework
|
e83f04c7563432a89a549ad8106d9439fadb0e70
|
[
"MIT"
] | null | null | null |
application.py
|
ds-vologdin/cuttlefish-web-framework
|
e83f04c7563432a89a549ad8106d9439fadb0e70
|
[
"MIT"
] | null | null | null |
from cuttlefish.cuttlefish_application import cuttlefish_application
import urls
def application(env, start_response):
urls_handlers = urls.urls_handlers
return cuttlefish_application(env, start_response, urls_handlers)
| 28.75
| 69
| 0.83913
| 27
| 230
| 6.851852
| 0.407407
| 0.340541
| 0.291892
| 0.291892
| 0.421622
| 0.421622
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113043
| 230
| 7
| 70
| 32.857143
| 0.906863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4f8c58e5bd777fb546770b877c30ac80632272cf
| 301
|
py
|
Python
|
web3/constants.py
|
ernestosperanza/web3.py
|
3e9e4b30812249da376dc752998d4e71b4f5836d
|
[
"MIT"
] | null | null | null |
web3/constants.py
|
ernestosperanza/web3.py
|
3e9e4b30812249da376dc752998d4e71b4f5836d
|
[
"MIT"
] | null | null | null |
web3/constants.py
|
ernestosperanza/web3.py
|
3e9e4b30812249da376dc752998d4e71b4f5836d
|
[
"MIT"
] | null | null | null |
# Constants as Strings
ADDRESS_ZERO = "0x0000000000000000000000000000000000000000"
MAX_INT = "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
HASH_ZERO = "0x0000000000000000000000000000000000000000000000000000000000000000"
# Constants as Int
WEI_PER_ETHER = 1000000000000000000
| 30.1
| 80
| 0.883721
| 19
| 301
| 13.736842
| 0.789474
| 0.084291
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.453237
| 0.076412
| 301
| 9
| 81
| 33.444444
| 0.485612
| 0.122924
| 0
| 0
| 0
| 0
| 0.666667
| 0.666667
| 0
| 0
| 0.666667
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4f905195ddbeaa2b264a991a6ee66020ece95733
| 6,479
|
py
|
Python
|
pyroomacoustics/windows.py
|
entn-at/pyroomacoustics
|
6572f6d0cde1a4de8d27caa43a7a67fc0ba91c9a
|
[
"MIT"
] | 1
|
2019-12-28T07:14:52.000Z
|
2019-12-28T07:14:52.000Z
|
pyroomacoustics/windows.py
|
entn-at/pyroomacoustics
|
6572f6d0cde1a4de8d27caa43a7a67fc0ba91c9a
|
[
"MIT"
] | null | null | null |
pyroomacoustics/windows.py
|
entn-at/pyroomacoustics
|
6572f6d0cde1a4de8d27caa43a7a67fc0ba91c9a
|
[
"MIT"
] | 1
|
2019-09-11T06:11:11.000Z
|
2019-09-11T06:11:11.000Z
|
# @version: 1.0 date: 05/06/2015 by Sidney Barthe
# @author: robin.scheibler@epfl.ch, ivan.dokmanic@epfl.ch, sidney.barthe@epfl.ch
# @copyright: EPFL-IC-LCAV 2015
'''A collection of windowing functions.'''
import numpy as np
# cosine window function
def cosine(N, flag='asymmetric', length='full'):
'''
The cosine window function
.. math::
w[n] = \cos(\pi (n/M - 0.5))^2
Parameters
----------
N: int
the window length
flag: string, optional
Possible values
- *asymmetric*: asymmetric windows are used for overlapping transforms (:math:`M=N`)
- *symmetric*: the window is symmetric (:math:`M=N-1`)
- *mdct*: impose MDCT condition on the window (:math:`M=N-1` and
:math:`w[n]^2 + w[n+N/2]^2=1`)
length: string, optional
Possible values
- *full*: the full length window is computed
- *right*: the right half of the window is computed
- *left*: the left half of the window is computed
'''
# first choose the indexes of points to compute
if (length == 'left'): # left side of window
t = np.arange(0, N / 2)
elif(length == 'right'): # right side of window
t = np.arange(N / 2, N)
else: # full window by default
t = np.arange(0, N)
# if asymmetric window, denominator is N, if symmetric it is N-1
if (flag == 'symmetric' or flag == 'mdct'):
t = t / float(N - 1)
else:
t = t / float(N)
w = np.cos(np.pi * (t - 0.5)) ** 2
# make the window respect MDCT condition
if (flag == 'mdct'):
w **= 2
d = w[:N / 2] + w[N / 2:]
w[:N / 2] *= 1. / d
w[N / 2:] *= 1. / d
# compute window
return w
# triangular window function
def triang(N, flag='asymmetric', length='full'):
'''
The triangular window function
.. math::
w[n] = 1 - | 2 n / M - 1 |, n=0,\ldots,N-1
Parameters
----------
N: int
the window length
flag: string, optional
Possible values
- *asymmetric*: asymmetric windows are used for overlapping transforms (:math:`M=N`)
- *symmetric*: the window is symmetric (:math:`M=N-1`)
- *mdct*: impose MDCT condition on the window (:math:`M=N-1` and
:math:`w[n]^2 + w[n+N/2]^2=1`)
length: string, optional
Possible values
- *full*: the full length window is computed
- *right*: the right half of the window is computed
- *left*: the left half of the window is computed
'''
# first choose the indexes of points to compute
if (length == 'left'): # left side of window
t = np.arange(0, N / 2)
elif(length == 'right'): # right side of window
t = np.arange(N / 2, N)
else: # full window by default
t = np.arange(0, N)
# if asymmetric window, denominator is N, if symmetric it is N-1
if (flag == 'symmetric' or flag == 'mdct'):
t = t / float(N - 1)
else:
t = t / float(N)
w = 1. - np.abs(2. * t - 1.)
# make the window respect MDCT condition
if (flag == 'mdct'):
d = w[:N / 2] + w[N / 2:]
w[:N / 2] *= 1. / d
w[N / 2:] *= 1. / d
# compute window
return w
# hann window function
def hann(N, flag='asymmetric', length='full'):
'''
The Hann window function
.. math::
w[n] = 0.5 (1 - \cos(2 \pi n / M)), n=0,\ldots,N-1
Parameters
----------
N: int
the window length
flag: string, optional
Possible values
- *asymmetric*: asymmetric windows are used for overlapping transforms (:math:`M=N`)
- *symmetric*: the window is symmetric (:math:`M=N-1`)
- *mdct*: impose MDCT condition on the window (:math:`M=N-1` and
:math:`w[n]^2 + w[n+N/2]^2=1`)
length: string, optional
Possible values
- *full*: the full length window is computed
- *right*: the right half of the window is computed
- *left*: the left half of the window is computed
'''
# first choose the indexes of points to compute
if (length == 'left'): # left side of window
t = np.arange(0, N / 2)
elif(length == 'right'): # right side of window
t = np.arange(N / 2, N)
else: # full window by default
t = np.arange(0, N)
# if asymmetric window, denominator is N, if symmetric it is N-1
if (flag == 'symmetric' or flag == 'mdct'):
t = t / float(N - 1)
else:
t = t / float(N)
w = 0.5 * (1 - np.cos(2 * np.pi * t))
# make the window respect MDCT condition
if (flag == 'mdct'):
d = w[:N / 2] + w[N / 2:]
w[:N / 2] *= 1. / d
w[N / 2:] *= 1. / d
# compute window
return w
# Blackman-Harris window
def blackman_harris(N, flag='asymmetric', length='full'):
'''
The Hann window function
.. math::
w[n] = a_0 - a_1 \cos(2\pi n/M) + a_2 \cos(4\pi n/M) + a_3 \cos(6\pi n/M), n=0,\ldots,N-1
Parameters
----------
N: int
the window length
flag: string, optional
Possible values
- *asymmetric*: asymmetric windows are used for overlapping transforms (:math:`M=N`)
- *symmetric*: the window is symmetric (:math:`M=N-1`)
length: string, optional
Possible values
- *full*: the full length window is computed
- *right*: the right half of the window is computed
- *left*: the left half of the window is computed
'''
# coefficients
a = np.array([.35875, .48829, .14128, .01168])
# first choose the indexes of points to compute
if (length == 'left'): # left side of window
t = np.arange(0, N / 2)
elif(length == 'right'): # right side of window
t = np.arange(N / 2, N)
else: # full window by default
t = np.arange(0, N)
# if asymmetric window, denominator is N, if symmetric it is N-1
if (flag == 'symmetric'):
t = t / float(N - 1)
else:
t = t / float(N)
pi = np.pi
w = a[0] - a[1]*np.cos(2*pi*t) + a[2]*np.cos(4*pi*t) + a[3]*np.cos(6*pi*t)
return w
# Rectangular window function
def rect(N):
'''
The rectangular window
.. math::
w[n] = 1, n=0,\ldots,N-1
Parameters
----------
N: int
the window length
'''
return np.ones(N)
| 26.662551
| 97
| 0.53249
| 942
| 6,479
| 3.657113
| 0.115711
| 0.015094
| 0.013062
| 0.01045
| 0.843251
| 0.83164
| 0.811321
| 0.811321
| 0.811321
| 0.798839
| 0
| 0.031685
| 0.322889
| 6,479
| 242
| 98
| 26.772727
| 0.75359
| 0.588671
| 0
| 0.785714
| 0
| 0
| 0.067797
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.014286
| 0
| 0.157143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
96d597cade894543af30d08fa7a60fcf41e62cb6
| 849,553
|
py
|
Python
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_tunnel_l2tun_oper.py
|
CiscoDevNet/ydk-py
|
073731fea50694d0bc6cd8ebf10fec308dcc0aa9
|
[
"ECL-2.0",
"Apache-2.0"
] | 177
|
2016-03-15T17:03:51.000Z
|
2022-03-18T16:48:44.000Z
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_tunnel_l2tun_oper.py
|
CiscoDevNet/ydk-py
|
073731fea50694d0bc6cd8ebf10fec308dcc0aa9
|
[
"ECL-2.0",
"Apache-2.0"
] | 18
|
2016-03-30T10:45:22.000Z
|
2020-07-14T16:28:13.000Z
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_tunnel_l2tun_oper.py
|
CiscoDevNet/ydk-py
|
073731fea50694d0bc6cd8ebf10fec308dcc0aa9
|
[
"ECL-2.0",
"Apache-2.0"
] | 85
|
2016-03-16T20:38:57.000Z
|
2022-02-22T04:26:02.000Z
|
""" Cisco_IOS_XR_tunnel_l2tun_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR tunnel\-l2tun package operational data.
This module contains definitions
for the following management objects\:
l2tp\: L2TP operational data
l2tpv2\: l2tpv2
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class DigestHash(Enum):
"""
DigestHash (Enum Class)
Digest hash types
.. data:: md5 = 0
MD5
.. data:: sha1 = 1
SHA1
"""
md5 = Enum.YLeaf(0, "md5")
sha1 = Enum.YLeaf(1, "sha1")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['DigestHash']
class L2tp(_Entity_):
"""
L2TP operational data
.. attribute:: nodes
List of nodes for which subscriber data is collected
**type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp, self).__init__()
self._top_entity = None
self.yang_name = "l2tp"
self.yang_parent_name = "Cisco-IOS-XR-tunnel-l2tun-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("nodes", ("nodes", L2tp.Nodes))])
self._leafs = OrderedDict()
self.nodes = L2tp.Nodes()
self.nodes.parent = self
self._children_name_map["nodes"] = "nodes"
self._segment_path = lambda: "Cisco-IOS-XR-tunnel-l2tun-oper:l2tp"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp, [], name, value)
class Nodes(_Entity_):
"""
List of nodes for which subscriber data is
collected
.. attribute:: node
Subscriber data for a particular node
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes, self).__init__()
self.yang_name = "nodes"
self.yang_parent_name = "l2tp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("node", ("node", L2tp.Nodes.Node))])
self._leafs = OrderedDict()
self.node = YList(self)
self._segment_path = lambda: "nodes"
self._absolute_path = lambda: "Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes, [], name, value)
class Node(_Entity_):
"""
Subscriber data for a particular node
.. attribute:: node_name (key)
Node name
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
**config**\: False
.. attribute:: counters
L2TP control messages counters
**type**\: :py:class:`Counters <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters>`
**config**\: False
.. attribute:: tunnel_configurations
List of tunnel IDs
**type**\: :py:class:`TunnelConfigurations <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.TunnelConfigurations>`
**config**\: False
.. attribute:: counter_hist_fail
Failure events leading to disconnection
**type**\: :py:class:`CounterHistFail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.CounterHistFail>`
**config**\: False
.. attribute:: classes
List of L2TP class names
**type**\: :py:class:`Classes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Classes>`
**config**\: False
.. attribute:: tunnels
List of tunnel IDs
**type**\: :py:class:`Tunnels <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Tunnels>`
**config**\: False
.. attribute:: sessions
List of session IDs
**type**\: :py:class:`Sessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Sessions>`
**config**\: False
.. attribute:: session
L2TP control messages counters
**type**\: :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Session>`
**config**\: False
.. attribute:: internal
L2TP v2/v3 internal information
**type**\: :py:class:`Internal <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Internal>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node, self).__init__()
self.yang_name = "node"
self.yang_parent_name = "nodes"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['node_name']
self._child_classes = OrderedDict([("counters", ("counters", L2tp.Nodes.Node.Counters)), ("tunnel-configurations", ("tunnel_configurations", L2tp.Nodes.Node.TunnelConfigurations)), ("counter-hist-fail", ("counter_hist_fail", L2tp.Nodes.Node.CounterHistFail)), ("classes", ("classes", L2tp.Nodes.Node.Classes)), ("tunnels", ("tunnels", L2tp.Nodes.Node.Tunnels)), ("sessions", ("sessions", L2tp.Nodes.Node.Sessions)), ("session", ("session", L2tp.Nodes.Node.Session)), ("internal", ("internal", L2tp.Nodes.Node.Internal))])
self._leafs = OrderedDict([
('node_name', (YLeaf(YType.str, 'node-name'), ['str'])),
])
self.node_name = None
self.counters = L2tp.Nodes.Node.Counters()
self.counters.parent = self
self._children_name_map["counters"] = "counters"
self.tunnel_configurations = L2tp.Nodes.Node.TunnelConfigurations()
self.tunnel_configurations.parent = self
self._children_name_map["tunnel_configurations"] = "tunnel-configurations"
self.counter_hist_fail = L2tp.Nodes.Node.CounterHistFail()
self.counter_hist_fail.parent = self
self._children_name_map["counter_hist_fail"] = "counter-hist-fail"
self.classes = L2tp.Nodes.Node.Classes()
self.classes.parent = self
self._children_name_map["classes"] = "classes"
self.tunnels = L2tp.Nodes.Node.Tunnels()
self.tunnels.parent = self
self._children_name_map["tunnels"] = "tunnels"
self.sessions = L2tp.Nodes.Node.Sessions()
self.sessions.parent = self
self._children_name_map["sessions"] = "sessions"
self.session = L2tp.Nodes.Node.Session()
self.session.parent = self
self._children_name_map["session"] = "session"
self.internal = L2tp.Nodes.Node.Internal()
self.internal.parent = self
self._children_name_map["internal"] = "internal"
self._segment_path = lambda: "node" + "[node-name='" + str(self.node_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/nodes/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node, ['node_name'], name, value)
class Counters(_Entity_):
"""
L2TP control messages counters
.. attribute:: control
L2TP control messages counters
**type**\: :py:class:`Control <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters, self).__init__()
self.yang_name = "counters"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("control", ("control", L2tp.Nodes.Node.Counters.Control))])
self._leafs = OrderedDict()
self.control = L2tp.Nodes.Node.Counters.Control()
self.control.parent = self
self._children_name_map["control"] = "control"
self._segment_path = lambda: "counters"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters, [], name, value)
class Control(_Entity_):
"""
L2TP control messages counters
.. attribute:: tunnel_xr
L2TP control tunnel messages counters
**type**\: :py:class:`TunnelXr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.TunnelXr>`
**config**\: False
.. attribute:: tunnels
Table of tunnel IDs of control message counters
**type**\: :py:class:`Tunnels <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.Tunnels>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control, self).__init__()
self.yang_name = "control"
self.yang_parent_name = "counters"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("tunnel-xr", ("tunnel_xr", L2tp.Nodes.Node.Counters.Control.TunnelXr)), ("tunnels", ("tunnels", L2tp.Nodes.Node.Counters.Control.Tunnels))])
self._leafs = OrderedDict()
self.tunnel_xr = L2tp.Nodes.Node.Counters.Control.TunnelXr()
self.tunnel_xr.parent = self
self._children_name_map["tunnel_xr"] = "tunnel-xr"
self.tunnels = L2tp.Nodes.Node.Counters.Control.Tunnels()
self.tunnels.parent = self
self._children_name_map["tunnels"] = "tunnels"
self._segment_path = lambda: "control"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control, [], name, value)
class TunnelXr(_Entity_):
"""
L2TP control tunnel messages counters
.. attribute:: authentication
Tunnel authentication counters
**type**\: :py:class:`Authentication <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication>`
**config**\: False
.. attribute:: global_
Tunnel counters
**type**\: :py:class:`Global <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.TunnelXr.Global>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.TunnelXr, self).__init__()
self.yang_name = "tunnel-xr"
self.yang_parent_name = "control"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("authentication", ("authentication", L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication)), ("global", ("global_", L2tp.Nodes.Node.Counters.Control.TunnelXr.Global))])
self._leafs = OrderedDict()
self.authentication = L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication()
self.authentication.parent = self
self._children_name_map["authentication"] = "authentication"
self.global_ = L2tp.Nodes.Node.Counters.Control.TunnelXr.Global()
self.global_.parent = self
self._children_name_map["global_"] = "global"
self._segment_path = lambda: "tunnel-xr"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.TunnelXr, [], name, value)
class Authentication(_Entity_):
"""
Tunnel authentication counters
.. attribute:: nonce_avp
Nonce AVP statistics
**type**\: :py:class:`NonceAvp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.NonceAvp>`
**config**\: False
.. attribute:: common_digest
Common digest statistics
**type**\: :py:class:`CommonDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.CommonDigest>`
**config**\: False
.. attribute:: primary_digest
Primary digest statistics
**type**\: :py:class:`PrimaryDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.PrimaryDigest>`
**config**\: False
.. attribute:: secondary_digest
Secondary digest statistics
**type**\: :py:class:`SecondaryDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.SecondaryDigest>`
**config**\: False
.. attribute:: integrity_check
Integrity check statistics
**type**\: :py:class:`IntegrityCheck <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.IntegrityCheck>`
**config**\: False
.. attribute:: local_secret
Local secret statistics
**type**\: :py:class:`LocalSecret <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.LocalSecret>`
**config**\: False
.. attribute:: challenge_avp
Challenge AVP statistics
**type**\: :py:class:`ChallengeAvp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeAvp>`
**config**\: False
.. attribute:: challenge_reponse
Challenge response statistics
**type**\: :py:class:`ChallengeReponse <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeReponse>`
**config**\: False
.. attribute:: overall_statistics
Overall statistics
**type**\: :py:class:`OverallStatistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.OverallStatistics>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication, self).__init__()
self.yang_name = "authentication"
self.yang_parent_name = "tunnel-xr"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("nonce-avp", ("nonce_avp", L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.NonceAvp)), ("common-digest", ("common_digest", L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.CommonDigest)), ("primary-digest", ("primary_digest", L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.PrimaryDigest)), ("secondary-digest", ("secondary_digest", L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.SecondaryDigest)), ("integrity-check", ("integrity_check", L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.IntegrityCheck)), ("local-secret", ("local_secret", L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.LocalSecret)), ("challenge-avp", ("challenge_avp", L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeAvp)), ("challenge-reponse", ("challenge_reponse", L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeReponse)), ("overall-statistics", ("overall_statistics", L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.OverallStatistics))])
self._leafs = OrderedDict()
self.nonce_avp = L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.NonceAvp()
self.nonce_avp.parent = self
self._children_name_map["nonce_avp"] = "nonce-avp"
self.common_digest = L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.CommonDigest()
self.common_digest.parent = self
self._children_name_map["common_digest"] = "common-digest"
self.primary_digest = L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.PrimaryDigest()
self.primary_digest.parent = self
self._children_name_map["primary_digest"] = "primary-digest"
self.secondary_digest = L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.SecondaryDigest()
self.secondary_digest.parent = self
self._children_name_map["secondary_digest"] = "secondary-digest"
self.integrity_check = L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.IntegrityCheck()
self.integrity_check.parent = self
self._children_name_map["integrity_check"] = "integrity-check"
self.local_secret = L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.LocalSecret()
self.local_secret.parent = self
self._children_name_map["local_secret"] = "local-secret"
self.challenge_avp = L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeAvp()
self.challenge_avp.parent = self
self._children_name_map["challenge_avp"] = "challenge-avp"
self.challenge_reponse = L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeReponse()
self.challenge_reponse.parent = self
self._children_name_map["challenge_reponse"] = "challenge-reponse"
self.overall_statistics = L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.OverallStatistics()
self.overall_statistics.parent = self
self._children_name_map["overall_statistics"] = "overall-statistics"
self._segment_path = lambda: "authentication"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication, [], name, value)
class NonceAvp(_Entity_):
"""
Nonce AVP statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.NonceAvp, self).__init__()
self.yang_name = "nonce-avp"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "nonce-avp"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.NonceAvp, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.NonceAvp']['meta_info']
class CommonDigest(_Entity_):
"""
Common digest statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.CommonDigest, self).__init__()
self.yang_name = "common-digest"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "common-digest"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.CommonDigest, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.CommonDigest']['meta_info']
class PrimaryDigest(_Entity_):
"""
Primary digest statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.PrimaryDigest, self).__init__()
self.yang_name = "primary-digest"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "primary-digest"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.PrimaryDigest, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.PrimaryDigest']['meta_info']
class SecondaryDigest(_Entity_):
"""
Secondary digest statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.SecondaryDigest, self).__init__()
self.yang_name = "secondary-digest"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "secondary-digest"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.SecondaryDigest, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.SecondaryDigest']['meta_info']
class IntegrityCheck(_Entity_):
"""
Integrity check statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.IntegrityCheck, self).__init__()
self.yang_name = "integrity-check"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "integrity-check"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.IntegrityCheck, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.IntegrityCheck']['meta_info']
class LocalSecret(_Entity_):
"""
Local secret statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.LocalSecret, self).__init__()
self.yang_name = "local-secret"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "local-secret"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.LocalSecret, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.LocalSecret']['meta_info']
class ChallengeAvp(_Entity_):
"""
Challenge AVP statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeAvp, self).__init__()
self.yang_name = "challenge-avp"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "challenge-avp"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeAvp, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeAvp']['meta_info']
class ChallengeReponse(_Entity_):
"""
Challenge response statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeReponse, self).__init__()
self.yang_name = "challenge-reponse"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "challenge-reponse"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeReponse, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeReponse']['meta_info']
class OverallStatistics(_Entity_):
"""
Overall statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.OverallStatistics, self).__init__()
self.yang_name = "overall-statistics"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "overall-statistics"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.OverallStatistics, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication.OverallStatistics']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.TunnelXr.Authentication']['meta_info']
class Global(_Entity_):
"""
Tunnel counters
.. attribute:: transmit
Transmit data
**type**\: :py:class:`Transmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Transmit>`
**config**\: False
.. attribute:: retransmit
Re transmit data
**type**\: :py:class:`Retransmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Retransmit>`
**config**\: False
.. attribute:: received
Received data
**type**\: :py:class:`Received <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Received>`
**config**\: False
.. attribute:: drop
Drop data
**type**\: :py:class:`Drop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Drop>`
**config**\: False
.. attribute:: total_transmit
Total transmit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_retransmit
Total retransmit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_received
Total received
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_drop
Total drop
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.TunnelXr.Global, self).__init__()
self.yang_name = "global"
self.yang_parent_name = "tunnel-xr"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("transmit", ("transmit", L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Transmit)), ("retransmit", ("retransmit", L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Retransmit)), ("received", ("received", L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Received)), ("drop", ("drop", L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Drop))])
self._leafs = OrderedDict([
('total_transmit', (YLeaf(YType.uint32, 'total-transmit'), ['int'])),
('total_retransmit', (YLeaf(YType.uint32, 'total-retransmit'), ['int'])),
('total_received', (YLeaf(YType.uint32, 'total-received'), ['int'])),
('total_drop', (YLeaf(YType.uint32, 'total-drop'), ['int'])),
])
self.total_transmit = None
self.total_retransmit = None
self.total_received = None
self.total_drop = None
self.transmit = L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Transmit()
self.transmit.parent = self
self._children_name_map["transmit"] = "transmit"
self.retransmit = L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Retransmit()
self.retransmit.parent = self
self._children_name_map["retransmit"] = "retransmit"
self.received = L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Received()
self.received.parent = self
self._children_name_map["received"] = "received"
self.drop = L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Drop()
self.drop.parent = self
self._children_name_map["drop"] = "drop"
self._segment_path = lambda: "global"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.TunnelXr.Global, ['total_transmit', 'total_retransmit', 'total_received', 'total_drop'], name, value)
class Transmit(_Entity_):
"""
Transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Transmit, self).__init__()
self.yang_name = "transmit"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', (YLeaf(YType.uint32, 'unknown-packets'), ['int'])),
('zero_length_body_packets', (YLeaf(YType.uint32, 'zero-length-body-packets'), ['int'])),
('start_control_connection_requests', (YLeaf(YType.uint32, 'start-control-connection-requests'), ['int'])),
('start_control_connection_replies', (YLeaf(YType.uint32, 'start-control-connection-replies'), ['int'])),
('start_control_connection_notifications', (YLeaf(YType.uint32, 'start-control-connection-notifications'), ['int'])),
('stop_control_connection_notifications', (YLeaf(YType.uint32, 'stop-control-connection-notifications'), ['int'])),
('hello_packets', (YLeaf(YType.uint32, 'hello-packets'), ['int'])),
('outgoing_call_requests', (YLeaf(YType.uint32, 'outgoing-call-requests'), ['int'])),
('outgoing_call_replies', (YLeaf(YType.uint32, 'outgoing-call-replies'), ['int'])),
('outgoing_call_connected_packets', (YLeaf(YType.uint32, 'outgoing-call-connected-packets'), ['int'])),
('incoming_call_requests', (YLeaf(YType.uint32, 'incoming-call-requests'), ['int'])),
('incoming_call_replies', (YLeaf(YType.uint32, 'incoming-call-replies'), ['int'])),
('incoming_call_connected_packets', (YLeaf(YType.uint32, 'incoming-call-connected-packets'), ['int'])),
('call_disconnect_notify_packets', (YLeaf(YType.uint32, 'call-disconnect-notify-packets'), ['int'])),
('wan_error_notify_packets', (YLeaf(YType.uint32, 'wan-error-notify-packets'), ['int'])),
('set_link_info_packets', (YLeaf(YType.uint32, 'set-link-info-packets'), ['int'])),
('service_relay_requests', (YLeaf(YType.uint32, 'service-relay-requests'), ['int'])),
('service_relay_replies', (YLeaf(YType.uint32, 'service-relay-replies'), ['int'])),
('acknowledgement_packets', (YLeaf(YType.uint32, 'acknowledgement-packets'), ['int'])),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "transmit"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Transmit, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Transmit']['meta_info']
class Retransmit(_Entity_):
"""
Re transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Retransmit, self).__init__()
self.yang_name = "retransmit"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', (YLeaf(YType.uint32, 'unknown-packets'), ['int'])),
('zero_length_body_packets', (YLeaf(YType.uint32, 'zero-length-body-packets'), ['int'])),
('start_control_connection_requests', (YLeaf(YType.uint32, 'start-control-connection-requests'), ['int'])),
('start_control_connection_replies', (YLeaf(YType.uint32, 'start-control-connection-replies'), ['int'])),
('start_control_connection_notifications', (YLeaf(YType.uint32, 'start-control-connection-notifications'), ['int'])),
('stop_control_connection_notifications', (YLeaf(YType.uint32, 'stop-control-connection-notifications'), ['int'])),
('hello_packets', (YLeaf(YType.uint32, 'hello-packets'), ['int'])),
('outgoing_call_requests', (YLeaf(YType.uint32, 'outgoing-call-requests'), ['int'])),
('outgoing_call_replies', (YLeaf(YType.uint32, 'outgoing-call-replies'), ['int'])),
('outgoing_call_connected_packets', (YLeaf(YType.uint32, 'outgoing-call-connected-packets'), ['int'])),
('incoming_call_requests', (YLeaf(YType.uint32, 'incoming-call-requests'), ['int'])),
('incoming_call_replies', (YLeaf(YType.uint32, 'incoming-call-replies'), ['int'])),
('incoming_call_connected_packets', (YLeaf(YType.uint32, 'incoming-call-connected-packets'), ['int'])),
('call_disconnect_notify_packets', (YLeaf(YType.uint32, 'call-disconnect-notify-packets'), ['int'])),
('wan_error_notify_packets', (YLeaf(YType.uint32, 'wan-error-notify-packets'), ['int'])),
('set_link_info_packets', (YLeaf(YType.uint32, 'set-link-info-packets'), ['int'])),
('service_relay_requests', (YLeaf(YType.uint32, 'service-relay-requests'), ['int'])),
('service_relay_replies', (YLeaf(YType.uint32, 'service-relay-replies'), ['int'])),
('acknowledgement_packets', (YLeaf(YType.uint32, 'acknowledgement-packets'), ['int'])),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "retransmit"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Retransmit, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Retransmit']['meta_info']
class Received(_Entity_):
"""
Received data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Received, self).__init__()
self.yang_name = "received"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', (YLeaf(YType.uint32, 'unknown-packets'), ['int'])),
('zero_length_body_packets', (YLeaf(YType.uint32, 'zero-length-body-packets'), ['int'])),
('start_control_connection_requests', (YLeaf(YType.uint32, 'start-control-connection-requests'), ['int'])),
('start_control_connection_replies', (YLeaf(YType.uint32, 'start-control-connection-replies'), ['int'])),
('start_control_connection_notifications', (YLeaf(YType.uint32, 'start-control-connection-notifications'), ['int'])),
('stop_control_connection_notifications', (YLeaf(YType.uint32, 'stop-control-connection-notifications'), ['int'])),
('hello_packets', (YLeaf(YType.uint32, 'hello-packets'), ['int'])),
('outgoing_call_requests', (YLeaf(YType.uint32, 'outgoing-call-requests'), ['int'])),
('outgoing_call_replies', (YLeaf(YType.uint32, 'outgoing-call-replies'), ['int'])),
('outgoing_call_connected_packets', (YLeaf(YType.uint32, 'outgoing-call-connected-packets'), ['int'])),
('incoming_call_requests', (YLeaf(YType.uint32, 'incoming-call-requests'), ['int'])),
('incoming_call_replies', (YLeaf(YType.uint32, 'incoming-call-replies'), ['int'])),
('incoming_call_connected_packets', (YLeaf(YType.uint32, 'incoming-call-connected-packets'), ['int'])),
('call_disconnect_notify_packets', (YLeaf(YType.uint32, 'call-disconnect-notify-packets'), ['int'])),
('wan_error_notify_packets', (YLeaf(YType.uint32, 'wan-error-notify-packets'), ['int'])),
('set_link_info_packets', (YLeaf(YType.uint32, 'set-link-info-packets'), ['int'])),
('service_relay_requests', (YLeaf(YType.uint32, 'service-relay-requests'), ['int'])),
('service_relay_replies', (YLeaf(YType.uint32, 'service-relay-replies'), ['int'])),
('acknowledgement_packets', (YLeaf(YType.uint32, 'acknowledgement-packets'), ['int'])),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "received"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Received, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Received']['meta_info']
class Drop(_Entity_):
"""
Drop data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Drop, self).__init__()
self.yang_name = "drop"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', (YLeaf(YType.uint32, 'unknown-packets'), ['int'])),
('zero_length_body_packets', (YLeaf(YType.uint32, 'zero-length-body-packets'), ['int'])),
('start_control_connection_requests', (YLeaf(YType.uint32, 'start-control-connection-requests'), ['int'])),
('start_control_connection_replies', (YLeaf(YType.uint32, 'start-control-connection-replies'), ['int'])),
('start_control_connection_notifications', (YLeaf(YType.uint32, 'start-control-connection-notifications'), ['int'])),
('stop_control_connection_notifications', (YLeaf(YType.uint32, 'stop-control-connection-notifications'), ['int'])),
('hello_packets', (YLeaf(YType.uint32, 'hello-packets'), ['int'])),
('outgoing_call_requests', (YLeaf(YType.uint32, 'outgoing-call-requests'), ['int'])),
('outgoing_call_replies', (YLeaf(YType.uint32, 'outgoing-call-replies'), ['int'])),
('outgoing_call_connected_packets', (YLeaf(YType.uint32, 'outgoing-call-connected-packets'), ['int'])),
('incoming_call_requests', (YLeaf(YType.uint32, 'incoming-call-requests'), ['int'])),
('incoming_call_replies', (YLeaf(YType.uint32, 'incoming-call-replies'), ['int'])),
('incoming_call_connected_packets', (YLeaf(YType.uint32, 'incoming-call-connected-packets'), ['int'])),
('call_disconnect_notify_packets', (YLeaf(YType.uint32, 'call-disconnect-notify-packets'), ['int'])),
('wan_error_notify_packets', (YLeaf(YType.uint32, 'wan-error-notify-packets'), ['int'])),
('set_link_info_packets', (YLeaf(YType.uint32, 'set-link-info-packets'), ['int'])),
('service_relay_requests', (YLeaf(YType.uint32, 'service-relay-requests'), ['int'])),
('service_relay_replies', (YLeaf(YType.uint32, 'service-relay-replies'), ['int'])),
('acknowledgement_packets', (YLeaf(YType.uint32, 'acknowledgement-packets'), ['int'])),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "drop"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Drop, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.TunnelXr.Global.Drop']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.TunnelXr.Global']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.TunnelXr']['meta_info']
class Tunnels(_Entity_):
"""
Table of tunnel IDs of control message counters
.. attribute:: tunnel
L2TP tunnel control message counters
**type**\: list of :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.Tunnels, self).__init__()
self.yang_name = "tunnels"
self.yang_parent_name = "control"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("tunnel", ("tunnel", L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel))])
self._leafs = OrderedDict()
self.tunnel = YList(self)
self._segment_path = lambda: "tunnels"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.Tunnels, [], name, value)
class Tunnel(_Entity_):
"""
L2TP tunnel control message counters
.. attribute:: tunnel_id (key)
L2TP tunnel ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: brief
L2TP control message local and remote addresses
**type**\: :py:class:`Brief <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Brief>`
**config**\: False
.. attribute:: global_
Global data
**type**\: :py:class:`Global <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel, self).__init__()
self.yang_name = "tunnel"
self.yang_parent_name = "tunnels"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['tunnel_id']
self._child_classes = OrderedDict([("brief", ("brief", L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Brief)), ("global", ("global_", L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global))])
self._leafs = OrderedDict([
('tunnel_id', (YLeaf(YType.uint32, 'tunnel-id'), ['int'])),
])
self.tunnel_id = None
self.brief = L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Brief()
self.brief.parent = self
self._children_name_map["brief"] = "brief"
self.global_ = L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global()
self.global_.parent = self
self._children_name_map["global_"] = "global"
self._segment_path = lambda: "tunnel" + "[tunnel-id='" + str(self.tunnel_id) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel, ['tunnel_id'], name, value)
class Brief(_Entity_):
"""
L2TP control message local and remote addresses
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: local_address
Local IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: remote_address
Remote IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Brief, self).__init__()
self.yang_name = "brief"
self.yang_parent_name = "tunnel"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('remote_tunnel_id', (YLeaf(YType.uint32, 'remote-tunnel-id'), ['int'])),
('local_address', (YLeaf(YType.str, 'local-address'), ['str'])),
('remote_address', (YLeaf(YType.str, 'remote-address'), ['str'])),
])
self.remote_tunnel_id = None
self.local_address = None
self.remote_address = None
self._segment_path = lambda: "brief"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Brief, ['remote_tunnel_id', 'local_address', 'remote_address'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Brief']['meta_info']
class Global(_Entity_):
"""
Global data
.. attribute:: transmit
Transmit data
**type**\: :py:class:`Transmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Transmit>`
**config**\: False
.. attribute:: retransmit
Re transmit data
**type**\: :py:class:`Retransmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Retransmit>`
**config**\: False
.. attribute:: received
Received data
**type**\: :py:class:`Received <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Received>`
**config**\: False
.. attribute:: drop
Drop data
**type**\: :py:class:`Drop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Drop>`
**config**\: False
.. attribute:: total_transmit
Total transmit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_retransmit
Total retransmit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_received
Total received
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_drop
Total drop
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global, self).__init__()
self.yang_name = "global"
self.yang_parent_name = "tunnel"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("transmit", ("transmit", L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Transmit)), ("retransmit", ("retransmit", L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Retransmit)), ("received", ("received", L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Received)), ("drop", ("drop", L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Drop))])
self._leafs = OrderedDict([
('total_transmit', (YLeaf(YType.uint32, 'total-transmit'), ['int'])),
('total_retransmit', (YLeaf(YType.uint32, 'total-retransmit'), ['int'])),
('total_received', (YLeaf(YType.uint32, 'total-received'), ['int'])),
('total_drop', (YLeaf(YType.uint32, 'total-drop'), ['int'])),
])
self.total_transmit = None
self.total_retransmit = None
self.total_received = None
self.total_drop = None
self.transmit = L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Transmit()
self.transmit.parent = self
self._children_name_map["transmit"] = "transmit"
self.retransmit = L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Retransmit()
self.retransmit.parent = self
self._children_name_map["retransmit"] = "retransmit"
self.received = L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Received()
self.received.parent = self
self._children_name_map["received"] = "received"
self.drop = L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Drop()
self.drop.parent = self
self._children_name_map["drop"] = "drop"
self._segment_path = lambda: "global"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global, ['total_transmit', 'total_retransmit', 'total_received', 'total_drop'], name, value)
class Transmit(_Entity_):
"""
Transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Transmit, self).__init__()
self.yang_name = "transmit"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', (YLeaf(YType.uint32, 'unknown-packets'), ['int'])),
('zero_length_body_packets', (YLeaf(YType.uint32, 'zero-length-body-packets'), ['int'])),
('start_control_connection_requests', (YLeaf(YType.uint32, 'start-control-connection-requests'), ['int'])),
('start_control_connection_replies', (YLeaf(YType.uint32, 'start-control-connection-replies'), ['int'])),
('start_control_connection_notifications', (YLeaf(YType.uint32, 'start-control-connection-notifications'), ['int'])),
('stop_control_connection_notifications', (YLeaf(YType.uint32, 'stop-control-connection-notifications'), ['int'])),
('hello_packets', (YLeaf(YType.uint32, 'hello-packets'), ['int'])),
('outgoing_call_requests', (YLeaf(YType.uint32, 'outgoing-call-requests'), ['int'])),
('outgoing_call_replies', (YLeaf(YType.uint32, 'outgoing-call-replies'), ['int'])),
('outgoing_call_connected_packets', (YLeaf(YType.uint32, 'outgoing-call-connected-packets'), ['int'])),
('incoming_call_requests', (YLeaf(YType.uint32, 'incoming-call-requests'), ['int'])),
('incoming_call_replies', (YLeaf(YType.uint32, 'incoming-call-replies'), ['int'])),
('incoming_call_connected_packets', (YLeaf(YType.uint32, 'incoming-call-connected-packets'), ['int'])),
('call_disconnect_notify_packets', (YLeaf(YType.uint32, 'call-disconnect-notify-packets'), ['int'])),
('wan_error_notify_packets', (YLeaf(YType.uint32, 'wan-error-notify-packets'), ['int'])),
('set_link_info_packets', (YLeaf(YType.uint32, 'set-link-info-packets'), ['int'])),
('service_relay_requests', (YLeaf(YType.uint32, 'service-relay-requests'), ['int'])),
('service_relay_replies', (YLeaf(YType.uint32, 'service-relay-replies'), ['int'])),
('acknowledgement_packets', (YLeaf(YType.uint32, 'acknowledgement-packets'), ['int'])),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "transmit"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Transmit, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Transmit']['meta_info']
class Retransmit(_Entity_):
"""
Re transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Retransmit, self).__init__()
self.yang_name = "retransmit"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', (YLeaf(YType.uint32, 'unknown-packets'), ['int'])),
('zero_length_body_packets', (YLeaf(YType.uint32, 'zero-length-body-packets'), ['int'])),
('start_control_connection_requests', (YLeaf(YType.uint32, 'start-control-connection-requests'), ['int'])),
('start_control_connection_replies', (YLeaf(YType.uint32, 'start-control-connection-replies'), ['int'])),
('start_control_connection_notifications', (YLeaf(YType.uint32, 'start-control-connection-notifications'), ['int'])),
('stop_control_connection_notifications', (YLeaf(YType.uint32, 'stop-control-connection-notifications'), ['int'])),
('hello_packets', (YLeaf(YType.uint32, 'hello-packets'), ['int'])),
('outgoing_call_requests', (YLeaf(YType.uint32, 'outgoing-call-requests'), ['int'])),
('outgoing_call_replies', (YLeaf(YType.uint32, 'outgoing-call-replies'), ['int'])),
('outgoing_call_connected_packets', (YLeaf(YType.uint32, 'outgoing-call-connected-packets'), ['int'])),
('incoming_call_requests', (YLeaf(YType.uint32, 'incoming-call-requests'), ['int'])),
('incoming_call_replies', (YLeaf(YType.uint32, 'incoming-call-replies'), ['int'])),
('incoming_call_connected_packets', (YLeaf(YType.uint32, 'incoming-call-connected-packets'), ['int'])),
('call_disconnect_notify_packets', (YLeaf(YType.uint32, 'call-disconnect-notify-packets'), ['int'])),
('wan_error_notify_packets', (YLeaf(YType.uint32, 'wan-error-notify-packets'), ['int'])),
('set_link_info_packets', (YLeaf(YType.uint32, 'set-link-info-packets'), ['int'])),
('service_relay_requests', (YLeaf(YType.uint32, 'service-relay-requests'), ['int'])),
('service_relay_replies', (YLeaf(YType.uint32, 'service-relay-replies'), ['int'])),
('acknowledgement_packets', (YLeaf(YType.uint32, 'acknowledgement-packets'), ['int'])),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "retransmit"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Retransmit, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Retransmit']['meta_info']
class Received(_Entity_):
"""
Received data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Received, self).__init__()
self.yang_name = "received"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', (YLeaf(YType.uint32, 'unknown-packets'), ['int'])),
('zero_length_body_packets', (YLeaf(YType.uint32, 'zero-length-body-packets'), ['int'])),
('start_control_connection_requests', (YLeaf(YType.uint32, 'start-control-connection-requests'), ['int'])),
('start_control_connection_replies', (YLeaf(YType.uint32, 'start-control-connection-replies'), ['int'])),
('start_control_connection_notifications', (YLeaf(YType.uint32, 'start-control-connection-notifications'), ['int'])),
('stop_control_connection_notifications', (YLeaf(YType.uint32, 'stop-control-connection-notifications'), ['int'])),
('hello_packets', (YLeaf(YType.uint32, 'hello-packets'), ['int'])),
('outgoing_call_requests', (YLeaf(YType.uint32, 'outgoing-call-requests'), ['int'])),
('outgoing_call_replies', (YLeaf(YType.uint32, 'outgoing-call-replies'), ['int'])),
('outgoing_call_connected_packets', (YLeaf(YType.uint32, 'outgoing-call-connected-packets'), ['int'])),
('incoming_call_requests', (YLeaf(YType.uint32, 'incoming-call-requests'), ['int'])),
('incoming_call_replies', (YLeaf(YType.uint32, 'incoming-call-replies'), ['int'])),
('incoming_call_connected_packets', (YLeaf(YType.uint32, 'incoming-call-connected-packets'), ['int'])),
('call_disconnect_notify_packets', (YLeaf(YType.uint32, 'call-disconnect-notify-packets'), ['int'])),
('wan_error_notify_packets', (YLeaf(YType.uint32, 'wan-error-notify-packets'), ['int'])),
('set_link_info_packets', (YLeaf(YType.uint32, 'set-link-info-packets'), ['int'])),
('service_relay_requests', (YLeaf(YType.uint32, 'service-relay-requests'), ['int'])),
('service_relay_replies', (YLeaf(YType.uint32, 'service-relay-replies'), ['int'])),
('acknowledgement_packets', (YLeaf(YType.uint32, 'acknowledgement-packets'), ['int'])),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "received"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Received, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Received']['meta_info']
class Drop(_Entity_):
"""
Drop data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Drop, self).__init__()
self.yang_name = "drop"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', (YLeaf(YType.uint32, 'unknown-packets'), ['int'])),
('zero_length_body_packets', (YLeaf(YType.uint32, 'zero-length-body-packets'), ['int'])),
('start_control_connection_requests', (YLeaf(YType.uint32, 'start-control-connection-requests'), ['int'])),
('start_control_connection_replies', (YLeaf(YType.uint32, 'start-control-connection-replies'), ['int'])),
('start_control_connection_notifications', (YLeaf(YType.uint32, 'start-control-connection-notifications'), ['int'])),
('stop_control_connection_notifications', (YLeaf(YType.uint32, 'stop-control-connection-notifications'), ['int'])),
('hello_packets', (YLeaf(YType.uint32, 'hello-packets'), ['int'])),
('outgoing_call_requests', (YLeaf(YType.uint32, 'outgoing-call-requests'), ['int'])),
('outgoing_call_replies', (YLeaf(YType.uint32, 'outgoing-call-replies'), ['int'])),
('outgoing_call_connected_packets', (YLeaf(YType.uint32, 'outgoing-call-connected-packets'), ['int'])),
('incoming_call_requests', (YLeaf(YType.uint32, 'incoming-call-requests'), ['int'])),
('incoming_call_replies', (YLeaf(YType.uint32, 'incoming-call-replies'), ['int'])),
('incoming_call_connected_packets', (YLeaf(YType.uint32, 'incoming-call-connected-packets'), ['int'])),
('call_disconnect_notify_packets', (YLeaf(YType.uint32, 'call-disconnect-notify-packets'), ['int'])),
('wan_error_notify_packets', (YLeaf(YType.uint32, 'wan-error-notify-packets'), ['int'])),
('set_link_info_packets', (YLeaf(YType.uint32, 'set-link-info-packets'), ['int'])),
('service_relay_requests', (YLeaf(YType.uint32, 'service-relay-requests'), ['int'])),
('service_relay_replies', (YLeaf(YType.uint32, 'service-relay-replies'), ['int'])),
('acknowledgement_packets', (YLeaf(YType.uint32, 'acknowledgement-packets'), ['int'])),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "drop"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Drop, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Drop']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.Tunnels.Tunnel']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control.Tunnels']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters.Control']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Counters']['meta_info']
class TunnelConfigurations(_Entity_):
"""
List of tunnel IDs
.. attribute:: tunnel_configuration
L2TP tunnel information
**type**\: list of :py:class:`TunnelConfiguration <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.TunnelConfigurations.TunnelConfiguration>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.TunnelConfigurations, self).__init__()
self.yang_name = "tunnel-configurations"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("tunnel-configuration", ("tunnel_configuration", L2tp.Nodes.Node.TunnelConfigurations.TunnelConfiguration))])
self._leafs = OrderedDict()
self.tunnel_configuration = YList(self)
self._segment_path = lambda: "tunnel-configurations"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.TunnelConfigurations, [], name, value)
class TunnelConfiguration(_Entity_):
"""
L2TP tunnel information
.. attribute:: local_tunnel_id (key)
Local tunnel ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_class
L2Tp class data
**type**\: :py:class:`L2tpClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.TunnelConfigurations.TunnelConfiguration.L2tpClass>`
**config**\: False
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.TunnelConfigurations.TunnelConfiguration, self).__init__()
self.yang_name = "tunnel-configuration"
self.yang_parent_name = "tunnel-configurations"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['local_tunnel_id']
self._child_classes = OrderedDict([("l2tp-class", ("l2tp_class", L2tp.Nodes.Node.TunnelConfigurations.TunnelConfiguration.L2tpClass))])
self._leafs = OrderedDict([
('local_tunnel_id', (YLeaf(YType.uint32, 'local-tunnel-id'), ['int'])),
('remote_tunnel_id', (YLeaf(YType.uint32, 'remote-tunnel-id'), ['int'])),
])
self.local_tunnel_id = None
self.remote_tunnel_id = None
self.l2tp_class = L2tp.Nodes.Node.TunnelConfigurations.TunnelConfiguration.L2tpClass()
self.l2tp_class.parent = self
self._children_name_map["l2tp_class"] = "l2tp-class"
self._segment_path = lambda: "tunnel-configuration" + "[local-tunnel-id='" + str(self.local_tunnel_id) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.TunnelConfigurations.TunnelConfiguration, ['local_tunnel_id', 'remote_tunnel_id'], name, value)
class L2tpClass(_Entity_):
"""
L2Tp class data
.. attribute:: ip_tos
IP TOS
**type**\: int
**range:** 0..255
**config**\: False
.. attribute:: vrf_name
VRF name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: receive_window_size
Receive window size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: class_name_xr
Class name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: digest_hash
Hash configured as MD5 or SHA1
**type**\: :py:class:`DigestHash <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.DigestHash>`
**config**\: False
.. attribute:: password
Password
**type**\: str
**length:** 0..25
**config**\: False
.. attribute:: encoded_password
Encoded password
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: host_name
Host name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: accounting_method_list
Accounting List
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: hello_timeout
Hello timeout value in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: setup_timeout
Timeout setup value in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: retransmit_minimum_timeout
Retransmit minimum timeout in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: retransmit_maximum_timeout
Retransmit maximum timeout in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: initial_retransmit_minimum_timeout
Initial timeout minimum in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: initial_retransmit_maximum_timeout
Initial timeout maximum in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: timeout_no_user
Timeout no user
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: retransmit_retries
Retransmit retries
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: initial_retransmit_retries
Initial retransmit retries
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: is_authentication_enabled
True if authentication is enabled
**type**\: bool
**config**\: False
.. attribute:: is_hidden
True if class is hidden
**type**\: bool
**config**\: False
.. attribute:: is_digest_enabled
True if digest authentication is enabled
**type**\: bool
**config**\: False
.. attribute:: is_digest_check_enabled
True if digest check is enabled
**type**\: bool
**config**\: False
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled
**type**\: bool
**config**\: False
.. attribute:: is_peer_address_checked
True if peer address is checked
**type**\: bool
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.TunnelConfigurations.TunnelConfiguration.L2tpClass, self).__init__()
self.yang_name = "l2tp-class"
self.yang_parent_name = "tunnel-configuration"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('ip_tos', (YLeaf(YType.uint8, 'ip-tos'), ['int'])),
('vrf_name', (YLeaf(YType.str, 'vrf-name'), ['str'])),
('receive_window_size', (YLeaf(YType.uint16, 'receive-window-size'), ['int'])),
('class_name_xr', (YLeaf(YType.str, 'class-name-xr'), ['str'])),
('digest_hash', (YLeaf(YType.enumeration, 'digest-hash'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper', 'DigestHash', '')])),
('password', (YLeaf(YType.str, 'password'), ['str'])),
('encoded_password', (YLeaf(YType.str, 'encoded-password'), ['str'])),
('host_name', (YLeaf(YType.str, 'host-name'), ['str'])),
('accounting_method_list', (YLeaf(YType.str, 'accounting-method-list'), ['str'])),
('hello_timeout', (YLeaf(YType.uint32, 'hello-timeout'), ['int'])),
('setup_timeout', (YLeaf(YType.uint32, 'setup-timeout'), ['int'])),
('retransmit_minimum_timeout', (YLeaf(YType.uint32, 'retransmit-minimum-timeout'), ['int'])),
('retransmit_maximum_timeout', (YLeaf(YType.uint32, 'retransmit-maximum-timeout'), ['int'])),
('initial_retransmit_minimum_timeout', (YLeaf(YType.uint32, 'initial-retransmit-minimum-timeout'), ['int'])),
('initial_retransmit_maximum_timeout', (YLeaf(YType.uint32, 'initial-retransmit-maximum-timeout'), ['int'])),
('timeout_no_user', (YLeaf(YType.uint32, 'timeout-no-user'), ['int'])),
('retransmit_retries', (YLeaf(YType.uint32, 'retransmit-retries'), ['int'])),
('initial_retransmit_retries', (YLeaf(YType.uint32, 'initial-retransmit-retries'), ['int'])),
('is_authentication_enabled', (YLeaf(YType.boolean, 'is-authentication-enabled'), ['bool'])),
('is_hidden', (YLeaf(YType.boolean, 'is-hidden'), ['bool'])),
('is_digest_enabled', (YLeaf(YType.boolean, 'is-digest-enabled'), ['bool'])),
('is_digest_check_enabled', (YLeaf(YType.boolean, 'is-digest-check-enabled'), ['bool'])),
('is_congestion_control_enabled', (YLeaf(YType.boolean, 'is-congestion-control-enabled'), ['bool'])),
('is_peer_address_checked', (YLeaf(YType.boolean, 'is-peer-address-checked'), ['bool'])),
])
self.ip_tos = None
self.vrf_name = None
self.receive_window_size = None
self.class_name_xr = None
self.digest_hash = None
self.password = None
self.encoded_password = None
self.host_name = None
self.accounting_method_list = None
self.hello_timeout = None
self.setup_timeout = None
self.retransmit_minimum_timeout = None
self.retransmit_maximum_timeout = None
self.initial_retransmit_minimum_timeout = None
self.initial_retransmit_maximum_timeout = None
self.timeout_no_user = None
self.retransmit_retries = None
self.initial_retransmit_retries = None
self.is_authentication_enabled = None
self.is_hidden = None
self.is_digest_enabled = None
self.is_digest_check_enabled = None
self.is_congestion_control_enabled = None
self.is_peer_address_checked = None
self._segment_path = lambda: "l2tp-class"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.TunnelConfigurations.TunnelConfiguration.L2tpClass, ['ip_tos', 'vrf_name', 'receive_window_size', 'class_name_xr', 'digest_hash', 'password', 'encoded_password', 'host_name', 'accounting_method_list', 'hello_timeout', 'setup_timeout', 'retransmit_minimum_timeout', 'retransmit_maximum_timeout', 'initial_retransmit_minimum_timeout', 'initial_retransmit_maximum_timeout', 'timeout_no_user', 'retransmit_retries', 'initial_retransmit_retries', 'is_authentication_enabled', 'is_hidden', 'is_digest_enabled', 'is_digest_check_enabled', 'is_congestion_control_enabled', 'is_peer_address_checked'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.TunnelConfigurations.TunnelConfiguration.L2tpClass']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.TunnelConfigurations.TunnelConfiguration']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.TunnelConfigurations']['meta_info']
class CounterHistFail(_Entity_):
"""
Failure events leading to disconnection
.. attribute:: sess_down_tmout
sesions affected due to timeout
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: tx_counters
Send side counters
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
**config**\: False
.. attribute:: rx_counters
Receive side counters
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
**config**\: False
.. attribute:: pkt_timeout
timeout events by packet
**type**\: list of :py:class:`PktTimeout <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.CounterHistFail.PktTimeout>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.CounterHistFail, self).__init__()
self.yang_name = "counter-hist-fail"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("pkt-timeout", ("pkt_timeout", L2tp.Nodes.Node.CounterHistFail.PktTimeout))])
self._leafs = OrderedDict([
('sess_down_tmout', (YLeaf(YType.uint32, 'sess-down-tmout'), ['int'])),
('tx_counters', (YLeaf(YType.str, 'tx-counters'), ['str'])),
('rx_counters', (YLeaf(YType.str, 'rx-counters'), ['str'])),
])
self.sess_down_tmout = None
self.tx_counters = None
self.rx_counters = None
self.pkt_timeout = YList(self)
self._segment_path = lambda: "counter-hist-fail"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.CounterHistFail, ['sess_down_tmout', 'tx_counters', 'rx_counters'], name, value)
class PktTimeout(_Entity_):
"""
timeout events by packet
.. attribute:: entry
timeout events by packet
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.CounterHistFail.PktTimeout, self).__init__()
self.yang_name = "pkt-timeout"
self.yang_parent_name = "counter-hist-fail"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('entry', (YLeaf(YType.uint32, 'entry'), ['int'])),
])
self.entry = None
self._segment_path = lambda: "pkt-timeout"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.CounterHistFail.PktTimeout, ['entry'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.CounterHistFail.PktTimeout']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.CounterHistFail']['meta_info']
class Classes(_Entity_):
"""
List of L2TP class names
.. attribute:: class_
L2TP class name
**type**\: list of :py:class:`Class <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Classes.Class>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Classes, self).__init__()
self.yang_name = "classes"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("class", ("class_", L2tp.Nodes.Node.Classes.Class))])
self._leafs = OrderedDict()
self.class_ = YList(self)
self._segment_path = lambda: "classes"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Classes, [], name, value)
class Class(_Entity_):
"""
L2TP class name
.. attribute:: class_name (key)
L2TP class name
**type**\: str
**length:** 1..31
**config**\: False
.. attribute:: ip_tos
IP TOS
**type**\: int
**range:** 0..255
**config**\: False
.. attribute:: vrf_name
VRF name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: receive_window_size
Receive window size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: class_name_xr
Class name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: digest_hash
Hash configured as MD5 or SHA1
**type**\: :py:class:`DigestHash <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.DigestHash>`
**config**\: False
.. attribute:: password
Password
**type**\: str
**length:** 0..25
**config**\: False
.. attribute:: encoded_password
Encoded password
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: host_name
Host name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: accounting_method_list
Accounting List
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: hello_timeout
Hello timeout value in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: setup_timeout
Timeout setup value in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: retransmit_minimum_timeout
Retransmit minimum timeout in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: retransmit_maximum_timeout
Retransmit maximum timeout in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: initial_retransmit_minimum_timeout
Initial timeout minimum in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: initial_retransmit_maximum_timeout
Initial timeout maximum in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: timeout_no_user
Timeout no user
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: retransmit_retries
Retransmit retries
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: initial_retransmit_retries
Initial retransmit retries
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: is_authentication_enabled
True if authentication is enabled
**type**\: bool
**config**\: False
.. attribute:: is_hidden
True if class is hidden
**type**\: bool
**config**\: False
.. attribute:: is_digest_enabled
True if digest authentication is enabled
**type**\: bool
**config**\: False
.. attribute:: is_digest_check_enabled
True if digest check is enabled
**type**\: bool
**config**\: False
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled
**type**\: bool
**config**\: False
.. attribute:: is_peer_address_checked
True if peer address is checked
**type**\: bool
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Classes.Class, self).__init__()
self.yang_name = "class"
self.yang_parent_name = "classes"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['class_name']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('class_name', (YLeaf(YType.str, 'class-name'), ['str'])),
('ip_tos', (YLeaf(YType.uint8, 'ip-tos'), ['int'])),
('vrf_name', (YLeaf(YType.str, 'vrf-name'), ['str'])),
('receive_window_size', (YLeaf(YType.uint16, 'receive-window-size'), ['int'])),
('class_name_xr', (YLeaf(YType.str, 'class-name-xr'), ['str'])),
('digest_hash', (YLeaf(YType.enumeration, 'digest-hash'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper', 'DigestHash', '')])),
('password', (YLeaf(YType.str, 'password'), ['str'])),
('encoded_password', (YLeaf(YType.str, 'encoded-password'), ['str'])),
('host_name', (YLeaf(YType.str, 'host-name'), ['str'])),
('accounting_method_list', (YLeaf(YType.str, 'accounting-method-list'), ['str'])),
('hello_timeout', (YLeaf(YType.uint32, 'hello-timeout'), ['int'])),
('setup_timeout', (YLeaf(YType.uint32, 'setup-timeout'), ['int'])),
('retransmit_minimum_timeout', (YLeaf(YType.uint32, 'retransmit-minimum-timeout'), ['int'])),
('retransmit_maximum_timeout', (YLeaf(YType.uint32, 'retransmit-maximum-timeout'), ['int'])),
('initial_retransmit_minimum_timeout', (YLeaf(YType.uint32, 'initial-retransmit-minimum-timeout'), ['int'])),
('initial_retransmit_maximum_timeout', (YLeaf(YType.uint32, 'initial-retransmit-maximum-timeout'), ['int'])),
('timeout_no_user', (YLeaf(YType.uint32, 'timeout-no-user'), ['int'])),
('retransmit_retries', (YLeaf(YType.uint32, 'retransmit-retries'), ['int'])),
('initial_retransmit_retries', (YLeaf(YType.uint32, 'initial-retransmit-retries'), ['int'])),
('is_authentication_enabled', (YLeaf(YType.boolean, 'is-authentication-enabled'), ['bool'])),
('is_hidden', (YLeaf(YType.boolean, 'is-hidden'), ['bool'])),
('is_digest_enabled', (YLeaf(YType.boolean, 'is-digest-enabled'), ['bool'])),
('is_digest_check_enabled', (YLeaf(YType.boolean, 'is-digest-check-enabled'), ['bool'])),
('is_congestion_control_enabled', (YLeaf(YType.boolean, 'is-congestion-control-enabled'), ['bool'])),
('is_peer_address_checked', (YLeaf(YType.boolean, 'is-peer-address-checked'), ['bool'])),
])
self.class_name = None
self.ip_tos = None
self.vrf_name = None
self.receive_window_size = None
self.class_name_xr = None
self.digest_hash = None
self.password = None
self.encoded_password = None
self.host_name = None
self.accounting_method_list = None
self.hello_timeout = None
self.setup_timeout = None
self.retransmit_minimum_timeout = None
self.retransmit_maximum_timeout = None
self.initial_retransmit_minimum_timeout = None
self.initial_retransmit_maximum_timeout = None
self.timeout_no_user = None
self.retransmit_retries = None
self.initial_retransmit_retries = None
self.is_authentication_enabled = None
self.is_hidden = None
self.is_digest_enabled = None
self.is_digest_check_enabled = None
self.is_congestion_control_enabled = None
self.is_peer_address_checked = None
self._segment_path = lambda: "class" + "[class-name='" + str(self.class_name) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Classes.Class, ['class_name', 'ip_tos', 'vrf_name', 'receive_window_size', 'class_name_xr', 'digest_hash', 'password', 'encoded_password', 'host_name', 'accounting_method_list', 'hello_timeout', 'setup_timeout', 'retransmit_minimum_timeout', 'retransmit_maximum_timeout', 'initial_retransmit_minimum_timeout', 'initial_retransmit_maximum_timeout', 'timeout_no_user', 'retransmit_retries', 'initial_retransmit_retries', 'is_authentication_enabled', 'is_hidden', 'is_digest_enabled', 'is_digest_check_enabled', 'is_congestion_control_enabled', 'is_peer_address_checked'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Classes.Class']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Classes']['meta_info']
class Tunnels(_Entity_):
"""
List of tunnel IDs
.. attribute:: tunnel
L2TP tunnel information
**type**\: list of :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Tunnels.Tunnel>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Tunnels, self).__init__()
self.yang_name = "tunnels"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("tunnel", ("tunnel", L2tp.Nodes.Node.Tunnels.Tunnel))])
self._leafs = OrderedDict()
self.tunnel = YList(self)
self._segment_path = lambda: "tunnels"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Tunnels, [], name, value)
class Tunnel(_Entity_):
"""
L2TP tunnel information
.. attribute:: local_tunnel_id (key)
Local tunnel ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: local_address
Local tunnel address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: remote_address
Remote tunnel address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: local_port
Local port
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: remote_port
Remote port
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: protocol
Protocol
**type**\: int
**range:** 0..255
**config**\: False
.. attribute:: is_pmtu_enabled
True if tunnel PMTU checking is enabled
**type**\: bool
**config**\: False
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: local_tunnel_name
Local tunnel name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: remote_tunnel_name
Remote tunnel name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: class_name
L2TP class name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: active_sessions
Number of active sessions
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: sequence_ns
Sequence NS
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: sequence_nr
Sequence NR
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: local_window_size
Local window size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: remote_window_size
Remote window size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: retransmission_time
Retransmission time in seconds
**type**\: int
**range:** 0..65535
**config**\: False
**units**\: second
.. attribute:: maximum_retransmission_time
Maximum retransmission time in seconds
**type**\: int
**range:** 0..65535
**config**\: False
**units**\: second
.. attribute:: unsent_queue_size
Unsent queue size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: unsent_maximum_queue_size
Unsent maximum queue size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: resend_queue_size
Resend queue size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: resend_maximum_queue_size
Resend maximum queue size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: order_queue_size
Order queue size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: packet_queue_check
Current number session packet queue check
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: digest_secrets
Control message authentication with digest secrets
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: resends
Total resends
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_acknowledgement_sent
Total zero length body acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_out_of_order_drop_packets
Total out of order dropped packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_out_of_order_reorder_packets
Total out of order reorder packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_peer_authentication_failures
Number of peer authentication failures
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: is_tunnel_up
True if tunnel is up
**type**\: bool
**config**\: False
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled else false
**type**\: bool
**config**\: False
.. attribute:: retransmit_time
Retransmit time distribution in seconds
**type**\: list of :py:class:`RetransmitTime <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Tunnels.Tunnel.RetransmitTime>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Tunnels.Tunnel, self).__init__()
self.yang_name = "tunnel"
self.yang_parent_name = "tunnels"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['local_tunnel_id']
self._child_classes = OrderedDict([("retransmit-time", ("retransmit_time", L2tp.Nodes.Node.Tunnels.Tunnel.RetransmitTime))])
self._leafs = OrderedDict([
('local_tunnel_id', (YLeaf(YType.uint32, 'local-tunnel-id'), ['int'])),
('local_address', (YLeaf(YType.str, 'local-address'), ['str'])),
('remote_address', (YLeaf(YType.str, 'remote-address'), ['str'])),
('local_port', (YLeaf(YType.uint16, 'local-port'), ['int'])),
('remote_port', (YLeaf(YType.uint16, 'remote-port'), ['int'])),
('protocol', (YLeaf(YType.uint8, 'protocol'), ['int'])),
('is_pmtu_enabled', (YLeaf(YType.boolean, 'is-pmtu-enabled'), ['bool'])),
('remote_tunnel_id', (YLeaf(YType.uint32, 'remote-tunnel-id'), ['int'])),
('local_tunnel_name', (YLeaf(YType.str, 'local-tunnel-name'), ['str'])),
('remote_tunnel_name', (YLeaf(YType.str, 'remote-tunnel-name'), ['str'])),
('class_name', (YLeaf(YType.str, 'class-name'), ['str'])),
('active_sessions', (YLeaf(YType.uint32, 'active-sessions'), ['int'])),
('sequence_ns', (YLeaf(YType.uint16, 'sequence-ns'), ['int'])),
('sequence_nr', (YLeaf(YType.uint16, 'sequence-nr'), ['int'])),
('local_window_size', (YLeaf(YType.uint16, 'local-window-size'), ['int'])),
('remote_window_size', (YLeaf(YType.uint16, 'remote-window-size'), ['int'])),
('retransmission_time', (YLeaf(YType.uint16, 'retransmission-time'), ['int'])),
('maximum_retransmission_time', (YLeaf(YType.uint16, 'maximum-retransmission-time'), ['int'])),
('unsent_queue_size', (YLeaf(YType.uint16, 'unsent-queue-size'), ['int'])),
('unsent_maximum_queue_size', (YLeaf(YType.uint16, 'unsent-maximum-queue-size'), ['int'])),
('resend_queue_size', (YLeaf(YType.uint16, 'resend-queue-size'), ['int'])),
('resend_maximum_queue_size', (YLeaf(YType.uint16, 'resend-maximum-queue-size'), ['int'])),
('order_queue_size', (YLeaf(YType.uint16, 'order-queue-size'), ['int'])),
('packet_queue_check', (YLeaf(YType.uint16, 'packet-queue-check'), ['int'])),
('digest_secrets', (YLeaf(YType.uint16, 'digest-secrets'), ['int'])),
('resends', (YLeaf(YType.uint32, 'resends'), ['int'])),
('zero_length_body_acknowledgement_sent', (YLeaf(YType.uint32, 'zero-length-body-acknowledgement-sent'), ['int'])),
('total_out_of_order_drop_packets', (YLeaf(YType.uint32, 'total-out-of-order-drop-packets'), ['int'])),
('total_out_of_order_reorder_packets', (YLeaf(YType.uint32, 'total-out-of-order-reorder-packets'), ['int'])),
('total_peer_authentication_failures', (YLeaf(YType.uint32, 'total-peer-authentication-failures'), ['int'])),
('is_tunnel_up', (YLeaf(YType.boolean, 'is-tunnel-up'), ['bool'])),
('is_congestion_control_enabled', (YLeaf(YType.boolean, 'is-congestion-control-enabled'), ['bool'])),
])
self.local_tunnel_id = None
self.local_address = None
self.remote_address = None
self.local_port = None
self.remote_port = None
self.protocol = None
self.is_pmtu_enabled = None
self.remote_tunnel_id = None
self.local_tunnel_name = None
self.remote_tunnel_name = None
self.class_name = None
self.active_sessions = None
self.sequence_ns = None
self.sequence_nr = None
self.local_window_size = None
self.remote_window_size = None
self.retransmission_time = None
self.maximum_retransmission_time = None
self.unsent_queue_size = None
self.unsent_maximum_queue_size = None
self.resend_queue_size = None
self.resend_maximum_queue_size = None
self.order_queue_size = None
self.packet_queue_check = None
self.digest_secrets = None
self.resends = None
self.zero_length_body_acknowledgement_sent = None
self.total_out_of_order_drop_packets = None
self.total_out_of_order_reorder_packets = None
self.total_peer_authentication_failures = None
self.is_tunnel_up = None
self.is_congestion_control_enabled = None
self.retransmit_time = YList(self)
self._segment_path = lambda: "tunnel" + "[local-tunnel-id='" + str(self.local_tunnel_id) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Tunnels.Tunnel, ['local_tunnel_id', 'local_address', 'remote_address', 'local_port', 'remote_port', 'protocol', 'is_pmtu_enabled', 'remote_tunnel_id', 'local_tunnel_name', 'remote_tunnel_name', 'class_name', 'active_sessions', 'sequence_ns', 'sequence_nr', 'local_window_size', 'remote_window_size', 'retransmission_time', 'maximum_retransmission_time', 'unsent_queue_size', 'unsent_maximum_queue_size', 'resend_queue_size', 'resend_maximum_queue_size', 'order_queue_size', 'packet_queue_check', 'digest_secrets', 'resends', 'zero_length_body_acknowledgement_sent', 'total_out_of_order_drop_packets', 'total_out_of_order_reorder_packets', 'total_peer_authentication_failures', 'is_tunnel_up', 'is_congestion_control_enabled'], name, value)
class RetransmitTime(_Entity_):
"""
Retransmit time distribution in seconds
.. attribute:: entry
Retransmit time distribution in seconds
**type**\: int
**range:** 0..65535
**config**\: False
**units**\: second
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Tunnels.Tunnel.RetransmitTime, self).__init__()
self.yang_name = "retransmit-time"
self.yang_parent_name = "tunnel"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('entry', (YLeaf(YType.uint16, 'entry'), ['int'])),
])
self.entry = None
self._segment_path = lambda: "retransmit-time"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Tunnels.Tunnel.RetransmitTime, ['entry'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Tunnels.Tunnel.RetransmitTime']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Tunnels.Tunnel']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Tunnels']['meta_info']
class Sessions(_Entity_):
"""
List of session IDs
.. attribute:: session
L2TP information for a particular session
**type**\: list of :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Sessions.Session>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Sessions, self).__init__()
self.yang_name = "sessions"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("session", ("session", L2tp.Nodes.Node.Sessions.Session))])
self._leafs = OrderedDict()
self.session = YList(self)
self._segment_path = lambda: "sessions"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Sessions, [], name, value)
class Session(_Entity_):
"""
L2TP information for a particular session
.. attribute:: local_tunnel_id (key)
Local tunnel ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: local_session_id (key)
Local session ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: session_application_data
Session application data
**type**\: :py:class:`SessionApplicationData <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Sessions.Session.SessionApplicationData>`
**config**\: False
.. attribute:: local_ip_address
Local session IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: remote_ip_address
Remote session IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: l2tp_sh_sess_udp_lport
l2tp sh sess udp lport
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: l2tp_sh_sess_udp_rport
l2tp sh sess udp rport
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: protocol
Protocol
**type**\: int
**range:** 0..255
**config**\: False
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_serial_number
Call serial number
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: local_tunnel_name
Local tunnel name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: remote_tunnel_name
Remote tunnel name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: remote_session_id
Remote session ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_sess_tie_breaker_enabled
l2tp sh sess tie breaker enabled
**type**\: int
**range:** 0..255
**config**\: False
.. attribute:: l2tp_sh_sess_tie_breaker
l2tp sh sess tie breaker
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: is_session_manual
True if session is manual
**type**\: bool
**config**\: False
.. attribute:: is_session_up
True if session is up
**type**\: bool
**config**\: False
.. attribute:: is_udp_checksum_enabled
True if UDP checksum enabled
**type**\: bool
**config**\: False
.. attribute:: is_sequencing_on
True if session sequence is on
**type**\: bool
**config**\: False
.. attribute:: is_session_state_established
True if session state is established
**type**\: bool
**config**\: False
.. attribute:: is_session_locally_initiated
True if session initiated locally
**type**\: bool
**config**\: False
.. attribute:: is_conditional_debug_enabled
True if conditional debugging is enabled
**type**\: bool
**config**\: False
.. attribute:: unique_id
Unique ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: interface_name
Interface name
**type**\: str
**length:** 0..256
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Sessions.Session, self).__init__()
self.yang_name = "session"
self.yang_parent_name = "sessions"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['local_tunnel_id','local_session_id']
self._child_classes = OrderedDict([("session-application-data", ("session_application_data", L2tp.Nodes.Node.Sessions.Session.SessionApplicationData))])
self._leafs = OrderedDict([
('local_tunnel_id', (YLeaf(YType.uint32, 'local-tunnel-id'), ['int'])),
('local_session_id', (YLeaf(YType.uint32, 'local-session-id'), ['int'])),
('local_ip_address', (YLeaf(YType.str, 'local-ip-address'), ['str'])),
('remote_ip_address', (YLeaf(YType.str, 'remote-ip-address'), ['str'])),
('l2tp_sh_sess_udp_lport', (YLeaf(YType.uint16, 'l2tp-sh-sess-udp-lport'), ['int'])),
('l2tp_sh_sess_udp_rport', (YLeaf(YType.uint16, 'l2tp-sh-sess-udp-rport'), ['int'])),
('protocol', (YLeaf(YType.uint8, 'protocol'), ['int'])),
('remote_tunnel_id', (YLeaf(YType.uint32, 'remote-tunnel-id'), ['int'])),
('call_serial_number', (YLeaf(YType.uint32, 'call-serial-number'), ['int'])),
('local_tunnel_name', (YLeaf(YType.str, 'local-tunnel-name'), ['str'])),
('remote_tunnel_name', (YLeaf(YType.str, 'remote-tunnel-name'), ['str'])),
('remote_session_id', (YLeaf(YType.uint32, 'remote-session-id'), ['int'])),
('l2tp_sh_sess_tie_breaker_enabled', (YLeaf(YType.uint8, 'l2tp-sh-sess-tie-breaker-enabled'), ['int'])),
('l2tp_sh_sess_tie_breaker', (YLeaf(YType.uint64, 'l2tp-sh-sess-tie-breaker'), ['int'])),
('is_session_manual', (YLeaf(YType.boolean, 'is-session-manual'), ['bool'])),
('is_session_up', (YLeaf(YType.boolean, 'is-session-up'), ['bool'])),
('is_udp_checksum_enabled', (YLeaf(YType.boolean, 'is-udp-checksum-enabled'), ['bool'])),
('is_sequencing_on', (YLeaf(YType.boolean, 'is-sequencing-on'), ['bool'])),
('is_session_state_established', (YLeaf(YType.boolean, 'is-session-state-established'), ['bool'])),
('is_session_locally_initiated', (YLeaf(YType.boolean, 'is-session-locally-initiated'), ['bool'])),
('is_conditional_debug_enabled', (YLeaf(YType.boolean, 'is-conditional-debug-enabled'), ['bool'])),
('unique_id', (YLeaf(YType.uint32, 'unique-id'), ['int'])),
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
])
self.local_tunnel_id = None
self.local_session_id = None
self.local_ip_address = None
self.remote_ip_address = None
self.l2tp_sh_sess_udp_lport = None
self.l2tp_sh_sess_udp_rport = None
self.protocol = None
self.remote_tunnel_id = None
self.call_serial_number = None
self.local_tunnel_name = None
self.remote_tunnel_name = None
self.remote_session_id = None
self.l2tp_sh_sess_tie_breaker_enabled = None
self.l2tp_sh_sess_tie_breaker = None
self.is_session_manual = None
self.is_session_up = None
self.is_udp_checksum_enabled = None
self.is_sequencing_on = None
self.is_session_state_established = None
self.is_session_locally_initiated = None
self.is_conditional_debug_enabled = None
self.unique_id = None
self.interface_name = None
self.session_application_data = L2tp.Nodes.Node.Sessions.Session.SessionApplicationData()
self.session_application_data.parent = self
self._children_name_map["session_application_data"] = "session-application-data"
self._segment_path = lambda: "session" + "[local-tunnel-id='" + str(self.local_tunnel_id) + "']" + "[local-session-id='" + str(self.local_session_id) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Sessions.Session, ['local_tunnel_id', 'local_session_id', 'local_ip_address', 'remote_ip_address', 'l2tp_sh_sess_udp_lport', 'l2tp_sh_sess_udp_rport', 'protocol', 'remote_tunnel_id', 'call_serial_number', 'local_tunnel_name', 'remote_tunnel_name', 'remote_session_id', 'l2tp_sh_sess_tie_breaker_enabled', 'l2tp_sh_sess_tie_breaker', 'is_session_manual', 'is_session_up', 'is_udp_checksum_enabled', 'is_sequencing_on', 'is_session_state_established', 'is_session_locally_initiated', 'is_conditional_debug_enabled', 'unique_id', 'interface_name'], name, value)
class SessionApplicationData(_Entity_):
"""
Session application data
.. attribute:: xconnect
Xconnect data
**type**\: :py:class:`Xconnect <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Sessions.Session.SessionApplicationData.Xconnect>`
**config**\: False
.. attribute:: vpdn
VPDN data
**type**\: :py:class:`Vpdn <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Sessions.Session.SessionApplicationData.Vpdn>`
**config**\: False
.. attribute:: l2tp_sh_sess_app_type
l2tp sh sess app type
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Sessions.Session.SessionApplicationData, self).__init__()
self.yang_name = "session-application-data"
self.yang_parent_name = "session"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("xconnect", ("xconnect", L2tp.Nodes.Node.Sessions.Session.SessionApplicationData.Xconnect)), ("vpdn", ("vpdn", L2tp.Nodes.Node.Sessions.Session.SessionApplicationData.Vpdn))])
self._leafs = OrderedDict([
('l2tp_sh_sess_app_type', (YLeaf(YType.uint32, 'l2tp-sh-sess-app-type'), ['int'])),
])
self.l2tp_sh_sess_app_type = None
self.xconnect = L2tp.Nodes.Node.Sessions.Session.SessionApplicationData.Xconnect()
self.xconnect.parent = self
self._children_name_map["xconnect"] = "xconnect"
self.vpdn = L2tp.Nodes.Node.Sessions.Session.SessionApplicationData.Vpdn()
self.vpdn.parent = self
self._children_name_map["vpdn"] = "vpdn"
self._segment_path = lambda: "session-application-data"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Sessions.Session.SessionApplicationData, ['l2tp_sh_sess_app_type'], name, value)
class Xconnect(_Entity_):
"""
Xconnect data
.. attribute:: circuit_name
Circuit name
**type**\: str
**config**\: False
.. attribute:: sessionvc_id
Session VC ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: is_circuit_state_up
True if circuit state is up
**type**\: bool
**config**\: False
.. attribute:: is_local_circuit_state_up
True if local circuit state is up
**type**\: bool
**config**\: False
.. attribute:: is_remote_circuit_state_up
True if remote circuit state is up
**type**\: bool
**config**\: False
.. attribute:: ipv6_protocol_tunneling
IPv6ProtocolTunneling
**type**\: bool
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Sessions.Session.SessionApplicationData.Xconnect, self).__init__()
self.yang_name = "xconnect"
self.yang_parent_name = "session-application-data"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('circuit_name', (YLeaf(YType.str, 'circuit-name'), ['str'])),
('sessionvc_id', (YLeaf(YType.uint32, 'sessionvc-id'), ['int'])),
('is_circuit_state_up', (YLeaf(YType.boolean, 'is-circuit-state-up'), ['bool'])),
('is_local_circuit_state_up', (YLeaf(YType.boolean, 'is-local-circuit-state-up'), ['bool'])),
('is_remote_circuit_state_up', (YLeaf(YType.boolean, 'is-remote-circuit-state-up'), ['bool'])),
('ipv6_protocol_tunneling', (YLeaf(YType.boolean, 'ipv6-protocol-tunneling'), ['bool'])),
])
self.circuit_name = None
self.sessionvc_id = None
self.is_circuit_state_up = None
self.is_local_circuit_state_up = None
self.is_remote_circuit_state_up = None
self.ipv6_protocol_tunneling = None
self._segment_path = lambda: "xconnect"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Sessions.Session.SessionApplicationData.Xconnect, ['circuit_name', 'sessionvc_id', 'is_circuit_state_up', 'is_local_circuit_state_up', 'is_remote_circuit_state_up', 'ipv6_protocol_tunneling'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Sessions.Session.SessionApplicationData.Xconnect']['meta_info']
class Vpdn(_Entity_):
"""
VPDN data
.. attribute:: username
Session username
**type**\: str
**config**\: False
.. attribute:: interface_name
Interface name
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Sessions.Session.SessionApplicationData.Vpdn, self).__init__()
self.yang_name = "vpdn"
self.yang_parent_name = "session-application-data"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('username', (YLeaf(YType.str, 'username'), ['str'])),
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
])
self.username = None
self.interface_name = None
self._segment_path = lambda: "vpdn"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Sessions.Session.SessionApplicationData.Vpdn, ['username', 'interface_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Sessions.Session.SessionApplicationData.Vpdn']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Sessions.Session.SessionApplicationData']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Sessions.Session']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Sessions']['meta_info']
class Session(_Entity_):
"""
L2TP control messages counters
.. attribute:: unavailable
L2TP session unavailable information
**type**\: :py:class:`Unavailable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Session.Unavailable>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Session, self).__init__()
self.yang_name = "session"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("unavailable", ("unavailable", L2tp.Nodes.Node.Session.Unavailable))])
self._leafs = OrderedDict()
self.unavailable = L2tp.Nodes.Node.Session.Unavailable()
self.unavailable.parent = self
self._children_name_map["unavailable"] = "unavailable"
self._segment_path = lambda: "session"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Session, [], name, value)
class Unavailable(_Entity_):
"""
L2TP session unavailable information
.. attribute:: sessions_on_hold
Number of session ID in hold database
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Session.Unavailable, self).__init__()
self.yang_name = "unavailable"
self.yang_parent_name = "session"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('sessions_on_hold', (YLeaf(YType.uint32, 'sessions-on-hold'), ['int'])),
])
self.sessions_on_hold = None
self._segment_path = lambda: "unavailable"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Session.Unavailable, ['sessions_on_hold'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Session.Unavailable']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Session']['meta_info']
class Internal(_Entity_):
"""
L2TP v2/v3 internal information
.. attribute:: internal_stats
internal stats
**type**\: :py:class:`InternalStats <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Internal.InternalStats>`
**config**\: False
.. attribute:: internal_stats_last_clear
internal stats last clear
**type**\: :py:class:`InternalStatsLastClear <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tp.Nodes.Node.Internal.InternalStatsLastClear>`
**config**\: False
.. attribute:: time_last_clear
time last clear
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Internal, self).__init__()
self.yang_name = "internal"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("internal-stats", ("internal_stats", L2tp.Nodes.Node.Internal.InternalStats)), ("internal-stats-last-clear", ("internal_stats_last_clear", L2tp.Nodes.Node.Internal.InternalStatsLastClear))])
self._leafs = OrderedDict([
('time_last_clear', (YLeaf(YType.uint32, 'time-last-clear'), ['int'])),
])
self.time_last_clear = None
self.internal_stats = L2tp.Nodes.Node.Internal.InternalStats()
self.internal_stats.parent = self
self._children_name_map["internal_stats"] = "internal-stats"
self.internal_stats_last_clear = L2tp.Nodes.Node.Internal.InternalStatsLastClear()
self.internal_stats_last_clear.parent = self
self._children_name_map["internal_stats_last_clear"] = "internal-stats-last-clear"
self._segment_path = lambda: "internal"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Internal, ['time_last_clear'], name, value)
class InternalStats(_Entity_):
"""
internal stats
.. attribute:: l2tp_sh_l2x_num_tunnels
l2tp sh l2x num tunnels
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_sessions
l2tp sh l2x num sessions
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_rx_high_water_mark
l2tp sh l2x rx high water mark
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_ave_msg_process_usecs
l2tp sh l2x ave msg process usecs
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_msgs
l2tp sh l2x num rx msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_tx_msgs
l2tp sh l2x num tx msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_tx_err_drops
l2tp sh l2x num tx err drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_tx_conn_drops
l2tp sh l2x num tx conn drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_reordered_msgs
l2tp sh l2x num reordered msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_max_reorder_deviation
l2tp sh l2x max reorder deviation
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_ooo_msgs
l2tp sh l2x num ooo msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_path_drops
l2tp sh l2x num rx path drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_path_data_pkt_drops
l2tp sh l2x num rx path data pkt drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_queue_drops
l2tp sh l2x num rx queue drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_ooo_drops
l2tp sh l2x num rx ooo drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_buffered_msgs
l2tp sh l2x num buffered msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_mutex_block
l2tp sh l2x num mutex block
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_bad_len_drops
l2tp sh l2x num bad len drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_bad_avp_drops
l2tp sh l2x num bad avp drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_missing_cc_id_drops
l2tp sh l2x num missing cc id drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_missing_sess_id_drops
l2tp sh l2x num missing sess id drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_mismatch_cc_id_drops
l2tp sh l2x num mismatch cc id drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_unknown_cc_drops
l2tp sh l2x num unknown cc drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_unknown_sess_drops
l2tp sh l2x num unknown sess drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_linear_id_search
l2tp sh l2x num linear id search
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_linear_id_search_fail
l2tp sh l2x num linear id search fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_netio_pkt_rx
l2tp sh l2x num netio pkt rx
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2tun_ave_msg_process_usecs
l2tp sh l2tun ave msg process usecs
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_sh_l2tun_num_rx_msgs
l2tp sh l2tun num rx msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2tun_num_tx_msgs
l2tp sh l2tun num tx msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_l2tun_socket_ens_send_error_cnt
l2tp l2tun socket ens send error cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_l2tun_socket_session_accept
l2tp l2tun socket session accept
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_socket_session_destroy
l2tp l2tun socket session destroy
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_socket_session_connect
l2tp l2tun socket session connect
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_socket_session_connect_continue
l2tp l2tun socket session connect continue
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_connecting
l2tp l2tun session connecting
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_connected
l2tp l2tun session connected
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_disconnected
l2tp l2tun session disconnected
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_incoming
l2tp l2tun session incoming
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_updated
l2tp l2tun session updated
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_circuit_status
l2tp l2tun session circuit status
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2x_lpts_pa_stats_setup_cnt
l2x lpts pa stats setup cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_destroy_cnt
l2x lpts pa stats destroy cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_alloc_cnt
l2x lpts pa stats alloc cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_alloc_fail_cnt
l2x lpts pa stats alloc fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_init_cnt
l2x lpts pa stats init cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_init_fail_cnt
l2x lpts pa stats init fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_free_cnt
l2x lpts pa stats free cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_pulse_cnt
l2x lpts pa stats pulse cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_pulse_fail_cnt
l2x lpts pa stats pulse fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_cnt
l2x lpts pa stats bind cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_fail_cnt
l2x lpts pa stats bind fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_batch_cnt
l2x lpts pa stats bind batch cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_batch_fail_cnt
l2x lpts pa stats bind batch fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_time
l2x lpts pa stats bind time
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_expire_cnt
l2x lpts pa stats expire cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_replay_cnt
l2x lpts pa stats replay cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_replay_batch_cnt
l2x lpts pa stats replay batch cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_replay_time
l2x lpts pa stats replay time
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Internal.InternalStats, self).__init__()
self.yang_name = "internal-stats"
self.yang_parent_name = "internal"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('l2tp_sh_l2x_num_tunnels', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-tunnels'), ['int'])),
('l2tp_sh_l2x_num_sessions', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-sessions'), ['int'])),
('l2tp_sh_l2x_rx_high_water_mark', (YLeaf(YType.uint32, 'l2tp-sh-l2x-rx-high-water-mark'), ['int'])),
('l2tp_sh_l2x_ave_msg_process_usecs', (YLeaf(YType.uint64, 'l2tp-sh-l2x-ave-msg-process-usecs'), ['int'])),
('l2tp_sh_l2x_num_rx_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-msgs'), ['int'])),
('l2tp_sh_l2x_num_tx_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-tx-msgs'), ['int'])),
('l2tp_sh_l2x_num_tx_err_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-tx-err-drops'), ['int'])),
('l2tp_sh_l2x_num_tx_conn_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-tx-conn-drops'), ['int'])),
('l2tp_sh_l2x_num_reordered_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-reordered-msgs'), ['int'])),
('l2tp_sh_l2x_max_reorder_deviation', (YLeaf(YType.uint32, 'l2tp-sh-l2x-max-reorder-deviation'), ['int'])),
('l2tp_sh_l2x_num_ooo_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-ooo-msgs'), ['int'])),
('l2tp_sh_l2x_num_rx_path_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-path-drops'), ['int'])),
('l2tp_sh_l2x_num_rx_path_data_pkt_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-path-data-pkt-drops'), ['int'])),
('l2tp_sh_l2x_num_rx_queue_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-queue-drops'), ['int'])),
('l2tp_sh_l2x_num_rx_ooo_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-ooo-drops'), ['int'])),
('l2tp_sh_l2x_num_buffered_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-buffered-msgs'), ['int'])),
('l2tp_sh_l2x_num_mutex_block', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-mutex-block'), ['int'])),
('l2tp_sh_l2x_num_bad_len_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-bad-len-drops'), ['int'])),
('l2tp_sh_l2x_num_bad_avp_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-bad-avp-drops'), ['int'])),
('l2tp_sh_l2x_num_missing_cc_id_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-missing-cc-id-drops'), ['int'])),
('l2tp_sh_l2x_num_missing_sess_id_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-missing-sess-id-drops'), ['int'])),
('l2tp_sh_l2x_num_mismatch_cc_id_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-mismatch-cc-id-drops'), ['int'])),
('l2tp_sh_l2x_num_unknown_cc_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-unknown-cc-drops'), ['int'])),
('l2tp_sh_l2x_num_unknown_sess_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-unknown-sess-drops'), ['int'])),
('l2tp_sh_l2x_num_linear_id_search', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-linear-id-search'), ['int'])),
('l2tp_sh_l2x_num_linear_id_search_fail', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-linear-id-search-fail'), ['int'])),
('l2tp_sh_l2x_num_netio_pkt_rx', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-netio-pkt-rx'), ['int'])),
('l2tp_sh_l2tun_ave_msg_process_usecs', (YLeaf(YType.uint64, 'l2tp-sh-l2tun-ave-msg-process-usecs'), ['int'])),
('l2tp_sh_l2tun_num_rx_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2tun-num-rx-msgs'), ['int'])),
('l2tp_sh_l2tun_num_tx_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2tun-num-tx-msgs'), ['int'])),
('l2tp_l2tun_socket_ens_send_error_cnt', (YLeaf(YType.uint32, 'l2tp-l2tun-socket-ens-send-error-cnt'), ['int'])),
('l2tp_l2tun_socket_session_accept', (YLeaf(YType.uint64, 'l2tp-l2tun-socket-session-accept'), ['int'])),
('l2tp_l2tun_socket_session_destroy', (YLeaf(YType.uint64, 'l2tp-l2tun-socket-session-destroy'), ['int'])),
('l2tp_l2tun_socket_session_connect', (YLeaf(YType.uint64, 'l2tp-l2tun-socket-session-connect'), ['int'])),
('l2tp_l2tun_socket_session_connect_continue', (YLeaf(YType.uint64, 'l2tp-l2tun-socket-session-connect-continue'), ['int'])),
('l2tp_l2tun_session_connecting', (YLeaf(YType.uint64, 'l2tp-l2tun-session-connecting'), ['int'])),
('l2tp_l2tun_session_connected', (YLeaf(YType.uint64, 'l2tp-l2tun-session-connected'), ['int'])),
('l2tp_l2tun_session_disconnected', (YLeaf(YType.uint64, 'l2tp-l2tun-session-disconnected'), ['int'])),
('l2tp_l2tun_session_incoming', (YLeaf(YType.uint64, 'l2tp-l2tun-session-incoming'), ['int'])),
('l2tp_l2tun_session_updated', (YLeaf(YType.uint64, 'l2tp-l2tun-session-updated'), ['int'])),
('l2tp_l2tun_session_circuit_status', (YLeaf(YType.uint64, 'l2tp-l2tun-session-circuit-status'), ['int'])),
('l2x_lpts_pa_stats_setup_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-setup-cnt'), ['int'])),
('l2x_lpts_pa_stats_destroy_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-destroy-cnt'), ['int'])),
('l2x_lpts_pa_stats_alloc_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-alloc-cnt'), ['int'])),
('l2x_lpts_pa_stats_alloc_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-alloc-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_init_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-init-cnt'), ['int'])),
('l2x_lpts_pa_stats_init_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-init-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_free_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-free-cnt'), ['int'])),
('l2x_lpts_pa_stats_pulse_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-pulse-cnt'), ['int'])),
('l2x_lpts_pa_stats_pulse_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-pulse-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_batch_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-batch-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_batch_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-batch-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_time', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-time'), ['int'])),
('l2x_lpts_pa_stats_expire_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-expire-cnt'), ['int'])),
('l2x_lpts_pa_stats_replay_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-replay-cnt'), ['int'])),
('l2x_lpts_pa_stats_replay_batch_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-replay-batch-cnt'), ['int'])),
('l2x_lpts_pa_stats_replay_time', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-replay-time'), ['int'])),
])
self.l2tp_sh_l2x_num_tunnels = None
self.l2tp_sh_l2x_num_sessions = None
self.l2tp_sh_l2x_rx_high_water_mark = None
self.l2tp_sh_l2x_ave_msg_process_usecs = None
self.l2tp_sh_l2x_num_rx_msgs = None
self.l2tp_sh_l2x_num_tx_msgs = None
self.l2tp_sh_l2x_num_tx_err_drops = None
self.l2tp_sh_l2x_num_tx_conn_drops = None
self.l2tp_sh_l2x_num_reordered_msgs = None
self.l2tp_sh_l2x_max_reorder_deviation = None
self.l2tp_sh_l2x_num_ooo_msgs = None
self.l2tp_sh_l2x_num_rx_path_drops = None
self.l2tp_sh_l2x_num_rx_path_data_pkt_drops = None
self.l2tp_sh_l2x_num_rx_queue_drops = None
self.l2tp_sh_l2x_num_rx_ooo_drops = None
self.l2tp_sh_l2x_num_buffered_msgs = None
self.l2tp_sh_l2x_num_mutex_block = None
self.l2tp_sh_l2x_num_bad_len_drops = None
self.l2tp_sh_l2x_num_bad_avp_drops = None
self.l2tp_sh_l2x_num_missing_cc_id_drops = None
self.l2tp_sh_l2x_num_missing_sess_id_drops = None
self.l2tp_sh_l2x_num_mismatch_cc_id_drops = None
self.l2tp_sh_l2x_num_unknown_cc_drops = None
self.l2tp_sh_l2x_num_unknown_sess_drops = None
self.l2tp_sh_l2x_num_linear_id_search = None
self.l2tp_sh_l2x_num_linear_id_search_fail = None
self.l2tp_sh_l2x_num_netio_pkt_rx = None
self.l2tp_sh_l2tun_ave_msg_process_usecs = None
self.l2tp_sh_l2tun_num_rx_msgs = None
self.l2tp_sh_l2tun_num_tx_msgs = None
self.l2tp_l2tun_socket_ens_send_error_cnt = None
self.l2tp_l2tun_socket_session_accept = None
self.l2tp_l2tun_socket_session_destroy = None
self.l2tp_l2tun_socket_session_connect = None
self.l2tp_l2tun_socket_session_connect_continue = None
self.l2tp_l2tun_session_connecting = None
self.l2tp_l2tun_session_connected = None
self.l2tp_l2tun_session_disconnected = None
self.l2tp_l2tun_session_incoming = None
self.l2tp_l2tun_session_updated = None
self.l2tp_l2tun_session_circuit_status = None
self.l2x_lpts_pa_stats_setup_cnt = None
self.l2x_lpts_pa_stats_destroy_cnt = None
self.l2x_lpts_pa_stats_alloc_cnt = None
self.l2x_lpts_pa_stats_alloc_fail_cnt = None
self.l2x_lpts_pa_stats_init_cnt = None
self.l2x_lpts_pa_stats_init_fail_cnt = None
self.l2x_lpts_pa_stats_free_cnt = None
self.l2x_lpts_pa_stats_pulse_cnt = None
self.l2x_lpts_pa_stats_pulse_fail_cnt = None
self.l2x_lpts_pa_stats_bind_cnt = None
self.l2x_lpts_pa_stats_bind_fail_cnt = None
self.l2x_lpts_pa_stats_bind_batch_cnt = None
self.l2x_lpts_pa_stats_bind_batch_fail_cnt = None
self.l2x_lpts_pa_stats_bind_time = None
self.l2x_lpts_pa_stats_expire_cnt = None
self.l2x_lpts_pa_stats_replay_cnt = None
self.l2x_lpts_pa_stats_replay_batch_cnt = None
self.l2x_lpts_pa_stats_replay_time = None
self._segment_path = lambda: "internal-stats"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Internal.InternalStats, ['l2tp_sh_l2x_num_tunnels', 'l2tp_sh_l2x_num_sessions', 'l2tp_sh_l2x_rx_high_water_mark', 'l2tp_sh_l2x_ave_msg_process_usecs', 'l2tp_sh_l2x_num_rx_msgs', 'l2tp_sh_l2x_num_tx_msgs', 'l2tp_sh_l2x_num_tx_err_drops', 'l2tp_sh_l2x_num_tx_conn_drops', 'l2tp_sh_l2x_num_reordered_msgs', 'l2tp_sh_l2x_max_reorder_deviation', 'l2tp_sh_l2x_num_ooo_msgs', 'l2tp_sh_l2x_num_rx_path_drops', 'l2tp_sh_l2x_num_rx_path_data_pkt_drops', 'l2tp_sh_l2x_num_rx_queue_drops', 'l2tp_sh_l2x_num_rx_ooo_drops', 'l2tp_sh_l2x_num_buffered_msgs', 'l2tp_sh_l2x_num_mutex_block', 'l2tp_sh_l2x_num_bad_len_drops', 'l2tp_sh_l2x_num_bad_avp_drops', 'l2tp_sh_l2x_num_missing_cc_id_drops', 'l2tp_sh_l2x_num_missing_sess_id_drops', 'l2tp_sh_l2x_num_mismatch_cc_id_drops', 'l2tp_sh_l2x_num_unknown_cc_drops', 'l2tp_sh_l2x_num_unknown_sess_drops', 'l2tp_sh_l2x_num_linear_id_search', 'l2tp_sh_l2x_num_linear_id_search_fail', 'l2tp_sh_l2x_num_netio_pkt_rx', 'l2tp_sh_l2tun_ave_msg_process_usecs', 'l2tp_sh_l2tun_num_rx_msgs', 'l2tp_sh_l2tun_num_tx_msgs', 'l2tp_l2tun_socket_ens_send_error_cnt', 'l2tp_l2tun_socket_session_accept', 'l2tp_l2tun_socket_session_destroy', 'l2tp_l2tun_socket_session_connect', 'l2tp_l2tun_socket_session_connect_continue', 'l2tp_l2tun_session_connecting', 'l2tp_l2tun_session_connected', 'l2tp_l2tun_session_disconnected', 'l2tp_l2tun_session_incoming', 'l2tp_l2tun_session_updated', 'l2tp_l2tun_session_circuit_status', 'l2x_lpts_pa_stats_setup_cnt', 'l2x_lpts_pa_stats_destroy_cnt', 'l2x_lpts_pa_stats_alloc_cnt', 'l2x_lpts_pa_stats_alloc_fail_cnt', 'l2x_lpts_pa_stats_init_cnt', 'l2x_lpts_pa_stats_init_fail_cnt', 'l2x_lpts_pa_stats_free_cnt', 'l2x_lpts_pa_stats_pulse_cnt', 'l2x_lpts_pa_stats_pulse_fail_cnt', 'l2x_lpts_pa_stats_bind_cnt', 'l2x_lpts_pa_stats_bind_fail_cnt', 'l2x_lpts_pa_stats_bind_batch_cnt', 'l2x_lpts_pa_stats_bind_batch_fail_cnt', 'l2x_lpts_pa_stats_bind_time', 'l2x_lpts_pa_stats_expire_cnt', 'l2x_lpts_pa_stats_replay_cnt', 'l2x_lpts_pa_stats_replay_batch_cnt', 'l2x_lpts_pa_stats_replay_time'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Internal.InternalStats']['meta_info']
class InternalStatsLastClear(_Entity_):
"""
internal stats last clear
.. attribute:: l2tp_sh_l2x_num_tunnels
l2tp sh l2x num tunnels
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_sessions
l2tp sh l2x num sessions
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_rx_high_water_mark
l2tp sh l2x rx high water mark
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_ave_msg_process_usecs
l2tp sh l2x ave msg process usecs
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_msgs
l2tp sh l2x num rx msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_tx_msgs
l2tp sh l2x num tx msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_tx_err_drops
l2tp sh l2x num tx err drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_tx_conn_drops
l2tp sh l2x num tx conn drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_reordered_msgs
l2tp sh l2x num reordered msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_max_reorder_deviation
l2tp sh l2x max reorder deviation
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_ooo_msgs
l2tp sh l2x num ooo msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_path_drops
l2tp sh l2x num rx path drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_path_data_pkt_drops
l2tp sh l2x num rx path data pkt drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_queue_drops
l2tp sh l2x num rx queue drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_ooo_drops
l2tp sh l2x num rx ooo drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_buffered_msgs
l2tp sh l2x num buffered msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_mutex_block
l2tp sh l2x num mutex block
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_bad_len_drops
l2tp sh l2x num bad len drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_bad_avp_drops
l2tp sh l2x num bad avp drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_missing_cc_id_drops
l2tp sh l2x num missing cc id drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_missing_sess_id_drops
l2tp sh l2x num missing sess id drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_mismatch_cc_id_drops
l2tp sh l2x num mismatch cc id drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_unknown_cc_drops
l2tp sh l2x num unknown cc drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_unknown_sess_drops
l2tp sh l2x num unknown sess drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_linear_id_search
l2tp sh l2x num linear id search
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_linear_id_search_fail
l2tp sh l2x num linear id search fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_netio_pkt_rx
l2tp sh l2x num netio pkt rx
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2tun_ave_msg_process_usecs
l2tp sh l2tun ave msg process usecs
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_sh_l2tun_num_rx_msgs
l2tp sh l2tun num rx msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2tun_num_tx_msgs
l2tp sh l2tun num tx msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_l2tun_socket_ens_send_error_cnt
l2tp l2tun socket ens send error cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_l2tun_socket_session_accept
l2tp l2tun socket session accept
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_socket_session_destroy
l2tp l2tun socket session destroy
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_socket_session_connect
l2tp l2tun socket session connect
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_socket_session_connect_continue
l2tp l2tun socket session connect continue
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_connecting
l2tp l2tun session connecting
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_connected
l2tp l2tun session connected
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_disconnected
l2tp l2tun session disconnected
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_incoming
l2tp l2tun session incoming
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_updated
l2tp l2tun session updated
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_circuit_status
l2tp l2tun session circuit status
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2x_lpts_pa_stats_setup_cnt
l2x lpts pa stats setup cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_destroy_cnt
l2x lpts pa stats destroy cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_alloc_cnt
l2x lpts pa stats alloc cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_alloc_fail_cnt
l2x lpts pa stats alloc fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_init_cnt
l2x lpts pa stats init cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_init_fail_cnt
l2x lpts pa stats init fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_free_cnt
l2x lpts pa stats free cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_pulse_cnt
l2x lpts pa stats pulse cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_pulse_fail_cnt
l2x lpts pa stats pulse fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_cnt
l2x lpts pa stats bind cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_fail_cnt
l2x lpts pa stats bind fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_batch_cnt
l2x lpts pa stats bind batch cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_batch_fail_cnt
l2x lpts pa stats bind batch fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_time
l2x lpts pa stats bind time
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_expire_cnt
l2x lpts pa stats expire cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_replay_cnt
l2x lpts pa stats replay cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_replay_batch_cnt
l2x lpts pa stats replay batch cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_replay_time
l2x lpts pa stats replay time
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tp.Nodes.Node.Internal.InternalStatsLastClear, self).__init__()
self.yang_name = "internal-stats-last-clear"
self.yang_parent_name = "internal"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('l2tp_sh_l2x_num_tunnels', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-tunnels'), ['int'])),
('l2tp_sh_l2x_num_sessions', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-sessions'), ['int'])),
('l2tp_sh_l2x_rx_high_water_mark', (YLeaf(YType.uint32, 'l2tp-sh-l2x-rx-high-water-mark'), ['int'])),
('l2tp_sh_l2x_ave_msg_process_usecs', (YLeaf(YType.uint64, 'l2tp-sh-l2x-ave-msg-process-usecs'), ['int'])),
('l2tp_sh_l2x_num_rx_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-msgs'), ['int'])),
('l2tp_sh_l2x_num_tx_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-tx-msgs'), ['int'])),
('l2tp_sh_l2x_num_tx_err_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-tx-err-drops'), ['int'])),
('l2tp_sh_l2x_num_tx_conn_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-tx-conn-drops'), ['int'])),
('l2tp_sh_l2x_num_reordered_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-reordered-msgs'), ['int'])),
('l2tp_sh_l2x_max_reorder_deviation', (YLeaf(YType.uint32, 'l2tp-sh-l2x-max-reorder-deviation'), ['int'])),
('l2tp_sh_l2x_num_ooo_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-ooo-msgs'), ['int'])),
('l2tp_sh_l2x_num_rx_path_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-path-drops'), ['int'])),
('l2tp_sh_l2x_num_rx_path_data_pkt_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-path-data-pkt-drops'), ['int'])),
('l2tp_sh_l2x_num_rx_queue_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-queue-drops'), ['int'])),
('l2tp_sh_l2x_num_rx_ooo_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-ooo-drops'), ['int'])),
('l2tp_sh_l2x_num_buffered_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-buffered-msgs'), ['int'])),
('l2tp_sh_l2x_num_mutex_block', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-mutex-block'), ['int'])),
('l2tp_sh_l2x_num_bad_len_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-bad-len-drops'), ['int'])),
('l2tp_sh_l2x_num_bad_avp_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-bad-avp-drops'), ['int'])),
('l2tp_sh_l2x_num_missing_cc_id_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-missing-cc-id-drops'), ['int'])),
('l2tp_sh_l2x_num_missing_sess_id_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-missing-sess-id-drops'), ['int'])),
('l2tp_sh_l2x_num_mismatch_cc_id_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-mismatch-cc-id-drops'), ['int'])),
('l2tp_sh_l2x_num_unknown_cc_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-unknown-cc-drops'), ['int'])),
('l2tp_sh_l2x_num_unknown_sess_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-unknown-sess-drops'), ['int'])),
('l2tp_sh_l2x_num_linear_id_search', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-linear-id-search'), ['int'])),
('l2tp_sh_l2x_num_linear_id_search_fail', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-linear-id-search-fail'), ['int'])),
('l2tp_sh_l2x_num_netio_pkt_rx', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-netio-pkt-rx'), ['int'])),
('l2tp_sh_l2tun_ave_msg_process_usecs', (YLeaf(YType.uint64, 'l2tp-sh-l2tun-ave-msg-process-usecs'), ['int'])),
('l2tp_sh_l2tun_num_rx_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2tun-num-rx-msgs'), ['int'])),
('l2tp_sh_l2tun_num_tx_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2tun-num-tx-msgs'), ['int'])),
('l2tp_l2tun_socket_ens_send_error_cnt', (YLeaf(YType.uint32, 'l2tp-l2tun-socket-ens-send-error-cnt'), ['int'])),
('l2tp_l2tun_socket_session_accept', (YLeaf(YType.uint64, 'l2tp-l2tun-socket-session-accept'), ['int'])),
('l2tp_l2tun_socket_session_destroy', (YLeaf(YType.uint64, 'l2tp-l2tun-socket-session-destroy'), ['int'])),
('l2tp_l2tun_socket_session_connect', (YLeaf(YType.uint64, 'l2tp-l2tun-socket-session-connect'), ['int'])),
('l2tp_l2tun_socket_session_connect_continue', (YLeaf(YType.uint64, 'l2tp-l2tun-socket-session-connect-continue'), ['int'])),
('l2tp_l2tun_session_connecting', (YLeaf(YType.uint64, 'l2tp-l2tun-session-connecting'), ['int'])),
('l2tp_l2tun_session_connected', (YLeaf(YType.uint64, 'l2tp-l2tun-session-connected'), ['int'])),
('l2tp_l2tun_session_disconnected', (YLeaf(YType.uint64, 'l2tp-l2tun-session-disconnected'), ['int'])),
('l2tp_l2tun_session_incoming', (YLeaf(YType.uint64, 'l2tp-l2tun-session-incoming'), ['int'])),
('l2tp_l2tun_session_updated', (YLeaf(YType.uint64, 'l2tp-l2tun-session-updated'), ['int'])),
('l2tp_l2tun_session_circuit_status', (YLeaf(YType.uint64, 'l2tp-l2tun-session-circuit-status'), ['int'])),
('l2x_lpts_pa_stats_setup_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-setup-cnt'), ['int'])),
('l2x_lpts_pa_stats_destroy_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-destroy-cnt'), ['int'])),
('l2x_lpts_pa_stats_alloc_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-alloc-cnt'), ['int'])),
('l2x_lpts_pa_stats_alloc_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-alloc-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_init_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-init-cnt'), ['int'])),
('l2x_lpts_pa_stats_init_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-init-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_free_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-free-cnt'), ['int'])),
('l2x_lpts_pa_stats_pulse_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-pulse-cnt'), ['int'])),
('l2x_lpts_pa_stats_pulse_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-pulse-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_batch_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-batch-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_batch_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-batch-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_time', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-time'), ['int'])),
('l2x_lpts_pa_stats_expire_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-expire-cnt'), ['int'])),
('l2x_lpts_pa_stats_replay_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-replay-cnt'), ['int'])),
('l2x_lpts_pa_stats_replay_batch_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-replay-batch-cnt'), ['int'])),
('l2x_lpts_pa_stats_replay_time', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-replay-time'), ['int'])),
])
self.l2tp_sh_l2x_num_tunnels = None
self.l2tp_sh_l2x_num_sessions = None
self.l2tp_sh_l2x_rx_high_water_mark = None
self.l2tp_sh_l2x_ave_msg_process_usecs = None
self.l2tp_sh_l2x_num_rx_msgs = None
self.l2tp_sh_l2x_num_tx_msgs = None
self.l2tp_sh_l2x_num_tx_err_drops = None
self.l2tp_sh_l2x_num_tx_conn_drops = None
self.l2tp_sh_l2x_num_reordered_msgs = None
self.l2tp_sh_l2x_max_reorder_deviation = None
self.l2tp_sh_l2x_num_ooo_msgs = None
self.l2tp_sh_l2x_num_rx_path_drops = None
self.l2tp_sh_l2x_num_rx_path_data_pkt_drops = None
self.l2tp_sh_l2x_num_rx_queue_drops = None
self.l2tp_sh_l2x_num_rx_ooo_drops = None
self.l2tp_sh_l2x_num_buffered_msgs = None
self.l2tp_sh_l2x_num_mutex_block = None
self.l2tp_sh_l2x_num_bad_len_drops = None
self.l2tp_sh_l2x_num_bad_avp_drops = None
self.l2tp_sh_l2x_num_missing_cc_id_drops = None
self.l2tp_sh_l2x_num_missing_sess_id_drops = None
self.l2tp_sh_l2x_num_mismatch_cc_id_drops = None
self.l2tp_sh_l2x_num_unknown_cc_drops = None
self.l2tp_sh_l2x_num_unknown_sess_drops = None
self.l2tp_sh_l2x_num_linear_id_search = None
self.l2tp_sh_l2x_num_linear_id_search_fail = None
self.l2tp_sh_l2x_num_netio_pkt_rx = None
self.l2tp_sh_l2tun_ave_msg_process_usecs = None
self.l2tp_sh_l2tun_num_rx_msgs = None
self.l2tp_sh_l2tun_num_tx_msgs = None
self.l2tp_l2tun_socket_ens_send_error_cnt = None
self.l2tp_l2tun_socket_session_accept = None
self.l2tp_l2tun_socket_session_destroy = None
self.l2tp_l2tun_socket_session_connect = None
self.l2tp_l2tun_socket_session_connect_continue = None
self.l2tp_l2tun_session_connecting = None
self.l2tp_l2tun_session_connected = None
self.l2tp_l2tun_session_disconnected = None
self.l2tp_l2tun_session_incoming = None
self.l2tp_l2tun_session_updated = None
self.l2tp_l2tun_session_circuit_status = None
self.l2x_lpts_pa_stats_setup_cnt = None
self.l2x_lpts_pa_stats_destroy_cnt = None
self.l2x_lpts_pa_stats_alloc_cnt = None
self.l2x_lpts_pa_stats_alloc_fail_cnt = None
self.l2x_lpts_pa_stats_init_cnt = None
self.l2x_lpts_pa_stats_init_fail_cnt = None
self.l2x_lpts_pa_stats_free_cnt = None
self.l2x_lpts_pa_stats_pulse_cnt = None
self.l2x_lpts_pa_stats_pulse_fail_cnt = None
self.l2x_lpts_pa_stats_bind_cnt = None
self.l2x_lpts_pa_stats_bind_fail_cnt = None
self.l2x_lpts_pa_stats_bind_batch_cnt = None
self.l2x_lpts_pa_stats_bind_batch_fail_cnt = None
self.l2x_lpts_pa_stats_bind_time = None
self.l2x_lpts_pa_stats_expire_cnt = None
self.l2x_lpts_pa_stats_replay_cnt = None
self.l2x_lpts_pa_stats_replay_batch_cnt = None
self.l2x_lpts_pa_stats_replay_time = None
self._segment_path = lambda: "internal-stats-last-clear"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tp.Nodes.Node.Internal.InternalStatsLastClear, ['l2tp_sh_l2x_num_tunnels', 'l2tp_sh_l2x_num_sessions', 'l2tp_sh_l2x_rx_high_water_mark', 'l2tp_sh_l2x_ave_msg_process_usecs', 'l2tp_sh_l2x_num_rx_msgs', 'l2tp_sh_l2x_num_tx_msgs', 'l2tp_sh_l2x_num_tx_err_drops', 'l2tp_sh_l2x_num_tx_conn_drops', 'l2tp_sh_l2x_num_reordered_msgs', 'l2tp_sh_l2x_max_reorder_deviation', 'l2tp_sh_l2x_num_ooo_msgs', 'l2tp_sh_l2x_num_rx_path_drops', 'l2tp_sh_l2x_num_rx_path_data_pkt_drops', 'l2tp_sh_l2x_num_rx_queue_drops', 'l2tp_sh_l2x_num_rx_ooo_drops', 'l2tp_sh_l2x_num_buffered_msgs', 'l2tp_sh_l2x_num_mutex_block', 'l2tp_sh_l2x_num_bad_len_drops', 'l2tp_sh_l2x_num_bad_avp_drops', 'l2tp_sh_l2x_num_missing_cc_id_drops', 'l2tp_sh_l2x_num_missing_sess_id_drops', 'l2tp_sh_l2x_num_mismatch_cc_id_drops', 'l2tp_sh_l2x_num_unknown_cc_drops', 'l2tp_sh_l2x_num_unknown_sess_drops', 'l2tp_sh_l2x_num_linear_id_search', 'l2tp_sh_l2x_num_linear_id_search_fail', 'l2tp_sh_l2x_num_netio_pkt_rx', 'l2tp_sh_l2tun_ave_msg_process_usecs', 'l2tp_sh_l2tun_num_rx_msgs', 'l2tp_sh_l2tun_num_tx_msgs', 'l2tp_l2tun_socket_ens_send_error_cnt', 'l2tp_l2tun_socket_session_accept', 'l2tp_l2tun_socket_session_destroy', 'l2tp_l2tun_socket_session_connect', 'l2tp_l2tun_socket_session_connect_continue', 'l2tp_l2tun_session_connecting', 'l2tp_l2tun_session_connected', 'l2tp_l2tun_session_disconnected', 'l2tp_l2tun_session_incoming', 'l2tp_l2tun_session_updated', 'l2tp_l2tun_session_circuit_status', 'l2x_lpts_pa_stats_setup_cnt', 'l2x_lpts_pa_stats_destroy_cnt', 'l2x_lpts_pa_stats_alloc_cnt', 'l2x_lpts_pa_stats_alloc_fail_cnt', 'l2x_lpts_pa_stats_init_cnt', 'l2x_lpts_pa_stats_init_fail_cnt', 'l2x_lpts_pa_stats_free_cnt', 'l2x_lpts_pa_stats_pulse_cnt', 'l2x_lpts_pa_stats_pulse_fail_cnt', 'l2x_lpts_pa_stats_bind_cnt', 'l2x_lpts_pa_stats_bind_fail_cnt', 'l2x_lpts_pa_stats_bind_batch_cnt', 'l2x_lpts_pa_stats_bind_batch_fail_cnt', 'l2x_lpts_pa_stats_bind_time', 'l2x_lpts_pa_stats_expire_cnt', 'l2x_lpts_pa_stats_replay_cnt', 'l2x_lpts_pa_stats_replay_batch_cnt', 'l2x_lpts_pa_stats_replay_time'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Internal.InternalStatsLastClear']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node.Internal']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes.Node']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp.Nodes']['meta_info']
def clone_ptr(self):
self._top_entity = L2tp()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tp']['meta_info']
class L2tpv2(_Entity_):
"""
l2tpv2
.. attribute:: nodes
List of nodes for which subscriber data is collected
**type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2, self).__init__()
self._top_entity = None
self.yang_name = "l2tpv2"
self.yang_parent_name = "Cisco-IOS-XR-tunnel-l2tun-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("nodes", ("nodes", L2tpv2.Nodes))])
self._leafs = OrderedDict()
self.nodes = L2tpv2.Nodes()
self.nodes.parent = self
self._children_name_map["nodes"] = "nodes"
self._segment_path = lambda: "Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2, [], name, value)
class Nodes(_Entity_):
"""
List of nodes for which subscriber data is
collected
.. attribute:: node
Subscriber data for a particular node
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes, self).__init__()
self.yang_name = "nodes"
self.yang_parent_name = "l2tpv2"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("node", ("node", L2tpv2.Nodes.Node))])
self._leafs = OrderedDict()
self.node = YList(self)
self._segment_path = lambda: "nodes"
self._absolute_path = lambda: "Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes, [], name, value)
class Node(_Entity_):
"""
Subscriber data for a particular node
.. attribute:: node_name (key)
Node name
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
**config**\: False
.. attribute:: counters
L2TP control messages counters
**type**\: :py:class:`Counters <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters>`
**config**\: False
.. attribute:: statistics
L2TP v2 statistics information
**type**\: :py:class:`Statistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Statistics>`
**config**\: False
.. attribute:: tunnel
L2TPv2 tunnel
**type**\: :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Tunnel>`
**config**\: False
.. attribute:: tunnel_configurations
List of tunnel IDs
**type**\: :py:class:`TunnelConfigurations <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.TunnelConfigurations>`
**config**\: False
.. attribute:: counter_hist_fail
Failure events leading to disconnection
**type**\: :py:class:`CounterHistFail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.CounterHistFail>`
**config**\: False
.. attribute:: classes
List of L2TP class names
**type**\: :py:class:`Classes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Classes>`
**config**\: False
.. attribute:: tunnels
List of tunnel IDs
**type**\: :py:class:`Tunnels <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Tunnels>`
**config**\: False
.. attribute:: sessions
List of session IDs
**type**\: :py:class:`Sessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Sessions>`
**config**\: False
.. attribute:: session
L2TP control messages counters
**type**\: :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Session>`
**config**\: False
.. attribute:: internal
L2TP v2/v3 internal information
**type**\: :py:class:`Internal <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Internal>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node, self).__init__()
self.yang_name = "node"
self.yang_parent_name = "nodes"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['node_name']
self._child_classes = OrderedDict([("counters", ("counters", L2tpv2.Nodes.Node.Counters)), ("statistics", ("statistics", L2tpv2.Nodes.Node.Statistics)), ("tunnel", ("tunnel", L2tpv2.Nodes.Node.Tunnel)), ("tunnel-configurations", ("tunnel_configurations", L2tpv2.Nodes.Node.TunnelConfigurations)), ("counter-hist-fail", ("counter_hist_fail", L2tpv2.Nodes.Node.CounterHistFail)), ("classes", ("classes", L2tpv2.Nodes.Node.Classes)), ("tunnels", ("tunnels", L2tpv2.Nodes.Node.Tunnels)), ("sessions", ("sessions", L2tpv2.Nodes.Node.Sessions)), ("session", ("session", L2tpv2.Nodes.Node.Session)), ("internal", ("internal", L2tpv2.Nodes.Node.Internal))])
self._leafs = OrderedDict([
('node_name', (YLeaf(YType.str, 'node-name'), ['str'])),
])
self.node_name = None
self.counters = L2tpv2.Nodes.Node.Counters()
self.counters.parent = self
self._children_name_map["counters"] = "counters"
self.statistics = L2tpv2.Nodes.Node.Statistics()
self.statistics.parent = self
self._children_name_map["statistics"] = "statistics"
self.tunnel = L2tpv2.Nodes.Node.Tunnel()
self.tunnel.parent = self
self._children_name_map["tunnel"] = "tunnel"
self.tunnel_configurations = L2tpv2.Nodes.Node.TunnelConfigurations()
self.tunnel_configurations.parent = self
self._children_name_map["tunnel_configurations"] = "tunnel-configurations"
self.counter_hist_fail = L2tpv2.Nodes.Node.CounterHistFail()
self.counter_hist_fail.parent = self
self._children_name_map["counter_hist_fail"] = "counter-hist-fail"
self.classes = L2tpv2.Nodes.Node.Classes()
self.classes.parent = self
self._children_name_map["classes"] = "classes"
self.tunnels = L2tpv2.Nodes.Node.Tunnels()
self.tunnels.parent = self
self._children_name_map["tunnels"] = "tunnels"
self.sessions = L2tpv2.Nodes.Node.Sessions()
self.sessions.parent = self
self._children_name_map["sessions"] = "sessions"
self.session = L2tpv2.Nodes.Node.Session()
self.session.parent = self
self._children_name_map["session"] = "session"
self.internal = L2tpv2.Nodes.Node.Internal()
self.internal.parent = self
self._children_name_map["internal"] = "internal"
self._segment_path = lambda: "node" + "[node-name='" + str(self.node_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/nodes/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node, ['node_name'], name, value)
class Counters(_Entity_):
"""
L2TP control messages counters
.. attribute:: forwarding
L2TP forwarding messages counters
**type**\: :py:class:`Forwarding <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Forwarding>`
**config**\: False
.. attribute:: control
L2TP control messages counters
**type**\: :py:class:`Control <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters, self).__init__()
self.yang_name = "counters"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("forwarding", ("forwarding", L2tpv2.Nodes.Node.Counters.Forwarding)), ("control", ("control", L2tpv2.Nodes.Node.Counters.Control))])
self._leafs = OrderedDict()
self.forwarding = L2tpv2.Nodes.Node.Counters.Forwarding()
self.forwarding.parent = self
self._children_name_map["forwarding"] = "forwarding"
self.control = L2tpv2.Nodes.Node.Counters.Control()
self.control.parent = self
self._children_name_map["control"] = "control"
self._segment_path = lambda: "counters"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters, [], name, value)
class Forwarding(_Entity_):
"""
L2TP forwarding messages counters
.. attribute:: sessions
List of class and session IDs
**type**\: :py:class:`Sessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Forwarding.Sessions>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Forwarding, self).__init__()
self.yang_name = "forwarding"
self.yang_parent_name = "counters"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("sessions", ("sessions", L2tpv2.Nodes.Node.Counters.Forwarding.Sessions))])
self._leafs = OrderedDict()
self.sessions = L2tpv2.Nodes.Node.Counters.Forwarding.Sessions()
self.sessions.parent = self
self._children_name_map["sessions"] = "sessions"
self._segment_path = lambda: "forwarding"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Forwarding, [], name, value)
class Sessions(_Entity_):
"""
List of class and session IDs
.. attribute:: session
L2TP information for a particular session
**type**\: list of :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Forwarding.Sessions.Session>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Forwarding.Sessions, self).__init__()
self.yang_name = "sessions"
self.yang_parent_name = "forwarding"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("session", ("session", L2tpv2.Nodes.Node.Counters.Forwarding.Sessions.Session))])
self._leafs = OrderedDict()
self.session = YList(self)
self._segment_path = lambda: "sessions"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Forwarding.Sessions, [], name, value)
class Session(_Entity_):
"""
L2TP information for a particular session
.. attribute:: tunnel_id (key)
Local tunnel ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: session_id (key)
Local session ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: remote_session_id
Remote session ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: in_packets
Number of packets sent in
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: out_packets
Number of packets sent out
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: in_bytes
Number of bytes sent in
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: byte
.. attribute:: out_bytes
Number of bytes sent out
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: byte
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Forwarding.Sessions.Session, self).__init__()
self.yang_name = "session"
self.yang_parent_name = "sessions"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['tunnel_id','session_id']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('tunnel_id', (YLeaf(YType.uint32, 'tunnel-id'), ['int'])),
('session_id', (YLeaf(YType.uint32, 'session-id'), ['int'])),
('remote_session_id', (YLeaf(YType.uint32, 'remote-session-id'), ['int'])),
('in_packets', (YLeaf(YType.uint64, 'in-packets'), ['int'])),
('out_packets', (YLeaf(YType.uint64, 'out-packets'), ['int'])),
('in_bytes', (YLeaf(YType.uint64, 'in-bytes'), ['int'])),
('out_bytes', (YLeaf(YType.uint64, 'out-bytes'), ['int'])),
])
self.tunnel_id = None
self.session_id = None
self.remote_session_id = None
self.in_packets = None
self.out_packets = None
self.in_bytes = None
self.out_bytes = None
self._segment_path = lambda: "session" + "[tunnel-id='" + str(self.tunnel_id) + "']" + "[session-id='" + str(self.session_id) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Forwarding.Sessions.Session, ['tunnel_id', 'session_id', 'remote_session_id', 'in_packets', 'out_packets', 'in_bytes', 'out_bytes'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Forwarding.Sessions.Session']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Forwarding.Sessions']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Forwarding']['meta_info']
class Control(_Entity_):
"""
L2TP control messages counters
.. attribute:: tunnel_xr
L2TP control tunnel messages counters
**type**\: :py:class:`TunnelXr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.TunnelXr>`
**config**\: False
.. attribute:: tunnels
Table of tunnel IDs of control message counters
**type**\: :py:class:`Tunnels <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.Tunnels>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control, self).__init__()
self.yang_name = "control"
self.yang_parent_name = "counters"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("tunnel-xr", ("tunnel_xr", L2tpv2.Nodes.Node.Counters.Control.TunnelXr)), ("tunnels", ("tunnels", L2tpv2.Nodes.Node.Counters.Control.Tunnels))])
self._leafs = OrderedDict()
self.tunnel_xr = L2tpv2.Nodes.Node.Counters.Control.TunnelXr()
self.tunnel_xr.parent = self
self._children_name_map["tunnel_xr"] = "tunnel-xr"
self.tunnels = L2tpv2.Nodes.Node.Counters.Control.Tunnels()
self.tunnels.parent = self
self._children_name_map["tunnels"] = "tunnels"
self._segment_path = lambda: "control"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control, [], name, value)
class TunnelXr(_Entity_):
"""
L2TP control tunnel messages counters
.. attribute:: authentication
Tunnel authentication counters
**type**\: :py:class:`Authentication <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication>`
**config**\: False
.. attribute:: global_
Tunnel counters
**type**\: :py:class:`Global <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.TunnelXr, self).__init__()
self.yang_name = "tunnel-xr"
self.yang_parent_name = "control"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("authentication", ("authentication", L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication)), ("global", ("global_", L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global))])
self._leafs = OrderedDict()
self.authentication = L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication()
self.authentication.parent = self
self._children_name_map["authentication"] = "authentication"
self.global_ = L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global()
self.global_.parent = self
self._children_name_map["global_"] = "global"
self._segment_path = lambda: "tunnel-xr"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.TunnelXr, [], name, value)
class Authentication(_Entity_):
"""
Tunnel authentication counters
.. attribute:: nonce_avp
Nonce AVP statistics
**type**\: :py:class:`NonceAvp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.NonceAvp>`
**config**\: False
.. attribute:: common_digest
Common digest statistics
**type**\: :py:class:`CommonDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.CommonDigest>`
**config**\: False
.. attribute:: primary_digest
Primary digest statistics
**type**\: :py:class:`PrimaryDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.PrimaryDigest>`
**config**\: False
.. attribute:: secondary_digest
Secondary digest statistics
**type**\: :py:class:`SecondaryDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.SecondaryDigest>`
**config**\: False
.. attribute:: integrity_check
Integrity check statistics
**type**\: :py:class:`IntegrityCheck <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.IntegrityCheck>`
**config**\: False
.. attribute:: local_secret
Local secret statistics
**type**\: :py:class:`LocalSecret <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.LocalSecret>`
**config**\: False
.. attribute:: challenge_avp
Challenge AVP statistics
**type**\: :py:class:`ChallengeAvp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeAvp>`
**config**\: False
.. attribute:: challenge_reponse
Challenge response statistics
**type**\: :py:class:`ChallengeReponse <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeReponse>`
**config**\: False
.. attribute:: overall_statistics
Overall statistics
**type**\: :py:class:`OverallStatistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.OverallStatistics>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication, self).__init__()
self.yang_name = "authentication"
self.yang_parent_name = "tunnel-xr"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("nonce-avp", ("nonce_avp", L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.NonceAvp)), ("common-digest", ("common_digest", L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.CommonDigest)), ("primary-digest", ("primary_digest", L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.PrimaryDigest)), ("secondary-digest", ("secondary_digest", L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.SecondaryDigest)), ("integrity-check", ("integrity_check", L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.IntegrityCheck)), ("local-secret", ("local_secret", L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.LocalSecret)), ("challenge-avp", ("challenge_avp", L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeAvp)), ("challenge-reponse", ("challenge_reponse", L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeReponse)), ("overall-statistics", ("overall_statistics", L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.OverallStatistics))])
self._leafs = OrderedDict()
self.nonce_avp = L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.NonceAvp()
self.nonce_avp.parent = self
self._children_name_map["nonce_avp"] = "nonce-avp"
self.common_digest = L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.CommonDigest()
self.common_digest.parent = self
self._children_name_map["common_digest"] = "common-digest"
self.primary_digest = L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.PrimaryDigest()
self.primary_digest.parent = self
self._children_name_map["primary_digest"] = "primary-digest"
self.secondary_digest = L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.SecondaryDigest()
self.secondary_digest.parent = self
self._children_name_map["secondary_digest"] = "secondary-digest"
self.integrity_check = L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.IntegrityCheck()
self.integrity_check.parent = self
self._children_name_map["integrity_check"] = "integrity-check"
self.local_secret = L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.LocalSecret()
self.local_secret.parent = self
self._children_name_map["local_secret"] = "local-secret"
self.challenge_avp = L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeAvp()
self.challenge_avp.parent = self
self._children_name_map["challenge_avp"] = "challenge-avp"
self.challenge_reponse = L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeReponse()
self.challenge_reponse.parent = self
self._children_name_map["challenge_reponse"] = "challenge-reponse"
self.overall_statistics = L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.OverallStatistics()
self.overall_statistics.parent = self
self._children_name_map["overall_statistics"] = "overall-statistics"
self._segment_path = lambda: "authentication"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication, [], name, value)
class NonceAvp(_Entity_):
"""
Nonce AVP statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.NonceAvp, self).__init__()
self.yang_name = "nonce-avp"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "nonce-avp"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.NonceAvp, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.NonceAvp']['meta_info']
class CommonDigest(_Entity_):
"""
Common digest statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.CommonDigest, self).__init__()
self.yang_name = "common-digest"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "common-digest"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.CommonDigest, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.CommonDigest']['meta_info']
class PrimaryDigest(_Entity_):
"""
Primary digest statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.PrimaryDigest, self).__init__()
self.yang_name = "primary-digest"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "primary-digest"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.PrimaryDigest, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.PrimaryDigest']['meta_info']
class SecondaryDigest(_Entity_):
"""
Secondary digest statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.SecondaryDigest, self).__init__()
self.yang_name = "secondary-digest"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "secondary-digest"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.SecondaryDigest, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.SecondaryDigest']['meta_info']
class IntegrityCheck(_Entity_):
"""
Integrity check statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.IntegrityCheck, self).__init__()
self.yang_name = "integrity-check"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "integrity-check"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.IntegrityCheck, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.IntegrityCheck']['meta_info']
class LocalSecret(_Entity_):
"""
Local secret statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.LocalSecret, self).__init__()
self.yang_name = "local-secret"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "local-secret"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.LocalSecret, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.LocalSecret']['meta_info']
class ChallengeAvp(_Entity_):
"""
Challenge AVP statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeAvp, self).__init__()
self.yang_name = "challenge-avp"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "challenge-avp"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeAvp, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeAvp']['meta_info']
class ChallengeReponse(_Entity_):
"""
Challenge response statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeReponse, self).__init__()
self.yang_name = "challenge-reponse"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "challenge-reponse"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeReponse, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.ChallengeReponse']['meta_info']
class OverallStatistics(_Entity_):
"""
Overall statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.OverallStatistics, self).__init__()
self.yang_name = "overall-statistics"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('validate', (YLeaf(YType.uint32, 'validate'), ['int'])),
('bad_hash', (YLeaf(YType.uint32, 'bad-hash'), ['int'])),
('bad_length', (YLeaf(YType.uint32, 'bad-length'), ['int'])),
('ignored', (YLeaf(YType.uint32, 'ignored'), ['int'])),
('missing', (YLeaf(YType.uint32, 'missing'), ['int'])),
('passed', (YLeaf(YType.uint32, 'passed'), ['int'])),
('failed', (YLeaf(YType.uint32, 'failed'), ['int'])),
('skipped', (YLeaf(YType.uint32, 'skipped'), ['int'])),
('generate_response_failures', (YLeaf(YType.uint32, 'generate-response-failures'), ['int'])),
('unexpected', (YLeaf(YType.uint32, 'unexpected'), ['int'])),
('unexpected_zlb', (YLeaf(YType.uint32, 'unexpected-zlb'), ['int'])),
])
self.validate = None
self.bad_hash = None
self.bad_length = None
self.ignored = None
self.missing = None
self.passed = None
self.failed = None
self.skipped = None
self.generate_response_failures = None
self.unexpected = None
self.unexpected_zlb = None
self._segment_path = lambda: "overall-statistics"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.OverallStatistics, ['validate', 'bad_hash', 'bad_length', 'ignored', 'missing', 'passed', 'failed', 'skipped', 'generate_response_failures', 'unexpected', 'unexpected_zlb'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication.OverallStatistics']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Authentication']['meta_info']
class Global(_Entity_):
"""
Tunnel counters
.. attribute:: transmit
Transmit data
**type**\: :py:class:`Transmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Transmit>`
**config**\: False
.. attribute:: retransmit
Re transmit data
**type**\: :py:class:`Retransmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Retransmit>`
**config**\: False
.. attribute:: received
Received data
**type**\: :py:class:`Received <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Received>`
**config**\: False
.. attribute:: drop
Drop data
**type**\: :py:class:`Drop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Drop>`
**config**\: False
.. attribute:: total_transmit
Total transmit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_retransmit
Total retransmit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_received
Total received
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_drop
Total drop
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global, self).__init__()
self.yang_name = "global"
self.yang_parent_name = "tunnel-xr"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("transmit", ("transmit", L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Transmit)), ("retransmit", ("retransmit", L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Retransmit)), ("received", ("received", L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Received)), ("drop", ("drop", L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Drop))])
self._leafs = OrderedDict([
('total_transmit', (YLeaf(YType.uint32, 'total-transmit'), ['int'])),
('total_retransmit', (YLeaf(YType.uint32, 'total-retransmit'), ['int'])),
('total_received', (YLeaf(YType.uint32, 'total-received'), ['int'])),
('total_drop', (YLeaf(YType.uint32, 'total-drop'), ['int'])),
])
self.total_transmit = None
self.total_retransmit = None
self.total_received = None
self.total_drop = None
self.transmit = L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Transmit()
self.transmit.parent = self
self._children_name_map["transmit"] = "transmit"
self.retransmit = L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Retransmit()
self.retransmit.parent = self
self._children_name_map["retransmit"] = "retransmit"
self.received = L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Received()
self.received.parent = self
self._children_name_map["received"] = "received"
self.drop = L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Drop()
self.drop.parent = self
self._children_name_map["drop"] = "drop"
self._segment_path = lambda: "global"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global, ['total_transmit', 'total_retransmit', 'total_received', 'total_drop'], name, value)
class Transmit(_Entity_):
"""
Transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Transmit, self).__init__()
self.yang_name = "transmit"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', (YLeaf(YType.uint32, 'unknown-packets'), ['int'])),
('zero_length_body_packets', (YLeaf(YType.uint32, 'zero-length-body-packets'), ['int'])),
('start_control_connection_requests', (YLeaf(YType.uint32, 'start-control-connection-requests'), ['int'])),
('start_control_connection_replies', (YLeaf(YType.uint32, 'start-control-connection-replies'), ['int'])),
('start_control_connection_notifications', (YLeaf(YType.uint32, 'start-control-connection-notifications'), ['int'])),
('stop_control_connection_notifications', (YLeaf(YType.uint32, 'stop-control-connection-notifications'), ['int'])),
('hello_packets', (YLeaf(YType.uint32, 'hello-packets'), ['int'])),
('outgoing_call_requests', (YLeaf(YType.uint32, 'outgoing-call-requests'), ['int'])),
('outgoing_call_replies', (YLeaf(YType.uint32, 'outgoing-call-replies'), ['int'])),
('outgoing_call_connected_packets', (YLeaf(YType.uint32, 'outgoing-call-connected-packets'), ['int'])),
('incoming_call_requests', (YLeaf(YType.uint32, 'incoming-call-requests'), ['int'])),
('incoming_call_replies', (YLeaf(YType.uint32, 'incoming-call-replies'), ['int'])),
('incoming_call_connected_packets', (YLeaf(YType.uint32, 'incoming-call-connected-packets'), ['int'])),
('call_disconnect_notify_packets', (YLeaf(YType.uint32, 'call-disconnect-notify-packets'), ['int'])),
('wan_error_notify_packets', (YLeaf(YType.uint32, 'wan-error-notify-packets'), ['int'])),
('set_link_info_packets', (YLeaf(YType.uint32, 'set-link-info-packets'), ['int'])),
('service_relay_requests', (YLeaf(YType.uint32, 'service-relay-requests'), ['int'])),
('service_relay_replies', (YLeaf(YType.uint32, 'service-relay-replies'), ['int'])),
('acknowledgement_packets', (YLeaf(YType.uint32, 'acknowledgement-packets'), ['int'])),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "transmit"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Transmit, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Transmit']['meta_info']
class Retransmit(_Entity_):
"""
Re transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Retransmit, self).__init__()
self.yang_name = "retransmit"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', (YLeaf(YType.uint32, 'unknown-packets'), ['int'])),
('zero_length_body_packets', (YLeaf(YType.uint32, 'zero-length-body-packets'), ['int'])),
('start_control_connection_requests', (YLeaf(YType.uint32, 'start-control-connection-requests'), ['int'])),
('start_control_connection_replies', (YLeaf(YType.uint32, 'start-control-connection-replies'), ['int'])),
('start_control_connection_notifications', (YLeaf(YType.uint32, 'start-control-connection-notifications'), ['int'])),
('stop_control_connection_notifications', (YLeaf(YType.uint32, 'stop-control-connection-notifications'), ['int'])),
('hello_packets', (YLeaf(YType.uint32, 'hello-packets'), ['int'])),
('outgoing_call_requests', (YLeaf(YType.uint32, 'outgoing-call-requests'), ['int'])),
('outgoing_call_replies', (YLeaf(YType.uint32, 'outgoing-call-replies'), ['int'])),
('outgoing_call_connected_packets', (YLeaf(YType.uint32, 'outgoing-call-connected-packets'), ['int'])),
('incoming_call_requests', (YLeaf(YType.uint32, 'incoming-call-requests'), ['int'])),
('incoming_call_replies', (YLeaf(YType.uint32, 'incoming-call-replies'), ['int'])),
('incoming_call_connected_packets', (YLeaf(YType.uint32, 'incoming-call-connected-packets'), ['int'])),
('call_disconnect_notify_packets', (YLeaf(YType.uint32, 'call-disconnect-notify-packets'), ['int'])),
('wan_error_notify_packets', (YLeaf(YType.uint32, 'wan-error-notify-packets'), ['int'])),
('set_link_info_packets', (YLeaf(YType.uint32, 'set-link-info-packets'), ['int'])),
('service_relay_requests', (YLeaf(YType.uint32, 'service-relay-requests'), ['int'])),
('service_relay_replies', (YLeaf(YType.uint32, 'service-relay-replies'), ['int'])),
('acknowledgement_packets', (YLeaf(YType.uint32, 'acknowledgement-packets'), ['int'])),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "retransmit"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Retransmit, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Retransmit']['meta_info']
class Received(_Entity_):
"""
Received data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Received, self).__init__()
self.yang_name = "received"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', (YLeaf(YType.uint32, 'unknown-packets'), ['int'])),
('zero_length_body_packets', (YLeaf(YType.uint32, 'zero-length-body-packets'), ['int'])),
('start_control_connection_requests', (YLeaf(YType.uint32, 'start-control-connection-requests'), ['int'])),
('start_control_connection_replies', (YLeaf(YType.uint32, 'start-control-connection-replies'), ['int'])),
('start_control_connection_notifications', (YLeaf(YType.uint32, 'start-control-connection-notifications'), ['int'])),
('stop_control_connection_notifications', (YLeaf(YType.uint32, 'stop-control-connection-notifications'), ['int'])),
('hello_packets', (YLeaf(YType.uint32, 'hello-packets'), ['int'])),
('outgoing_call_requests', (YLeaf(YType.uint32, 'outgoing-call-requests'), ['int'])),
('outgoing_call_replies', (YLeaf(YType.uint32, 'outgoing-call-replies'), ['int'])),
('outgoing_call_connected_packets', (YLeaf(YType.uint32, 'outgoing-call-connected-packets'), ['int'])),
('incoming_call_requests', (YLeaf(YType.uint32, 'incoming-call-requests'), ['int'])),
('incoming_call_replies', (YLeaf(YType.uint32, 'incoming-call-replies'), ['int'])),
('incoming_call_connected_packets', (YLeaf(YType.uint32, 'incoming-call-connected-packets'), ['int'])),
('call_disconnect_notify_packets', (YLeaf(YType.uint32, 'call-disconnect-notify-packets'), ['int'])),
('wan_error_notify_packets', (YLeaf(YType.uint32, 'wan-error-notify-packets'), ['int'])),
('set_link_info_packets', (YLeaf(YType.uint32, 'set-link-info-packets'), ['int'])),
('service_relay_requests', (YLeaf(YType.uint32, 'service-relay-requests'), ['int'])),
('service_relay_replies', (YLeaf(YType.uint32, 'service-relay-replies'), ['int'])),
('acknowledgement_packets', (YLeaf(YType.uint32, 'acknowledgement-packets'), ['int'])),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "received"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Received, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Received']['meta_info']
class Drop(_Entity_):
"""
Drop data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Drop, self).__init__()
self.yang_name = "drop"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', (YLeaf(YType.uint32, 'unknown-packets'), ['int'])),
('zero_length_body_packets', (YLeaf(YType.uint32, 'zero-length-body-packets'), ['int'])),
('start_control_connection_requests', (YLeaf(YType.uint32, 'start-control-connection-requests'), ['int'])),
('start_control_connection_replies', (YLeaf(YType.uint32, 'start-control-connection-replies'), ['int'])),
('start_control_connection_notifications', (YLeaf(YType.uint32, 'start-control-connection-notifications'), ['int'])),
('stop_control_connection_notifications', (YLeaf(YType.uint32, 'stop-control-connection-notifications'), ['int'])),
('hello_packets', (YLeaf(YType.uint32, 'hello-packets'), ['int'])),
('outgoing_call_requests', (YLeaf(YType.uint32, 'outgoing-call-requests'), ['int'])),
('outgoing_call_replies', (YLeaf(YType.uint32, 'outgoing-call-replies'), ['int'])),
('outgoing_call_connected_packets', (YLeaf(YType.uint32, 'outgoing-call-connected-packets'), ['int'])),
('incoming_call_requests', (YLeaf(YType.uint32, 'incoming-call-requests'), ['int'])),
('incoming_call_replies', (YLeaf(YType.uint32, 'incoming-call-replies'), ['int'])),
('incoming_call_connected_packets', (YLeaf(YType.uint32, 'incoming-call-connected-packets'), ['int'])),
('call_disconnect_notify_packets', (YLeaf(YType.uint32, 'call-disconnect-notify-packets'), ['int'])),
('wan_error_notify_packets', (YLeaf(YType.uint32, 'wan-error-notify-packets'), ['int'])),
('set_link_info_packets', (YLeaf(YType.uint32, 'set-link-info-packets'), ['int'])),
('service_relay_requests', (YLeaf(YType.uint32, 'service-relay-requests'), ['int'])),
('service_relay_replies', (YLeaf(YType.uint32, 'service-relay-replies'), ['int'])),
('acknowledgement_packets', (YLeaf(YType.uint32, 'acknowledgement-packets'), ['int'])),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "drop"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Drop, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global.Drop']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.TunnelXr.Global']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.TunnelXr']['meta_info']
class Tunnels(_Entity_):
"""
Table of tunnel IDs of control message counters
.. attribute:: tunnel
L2TP tunnel control message counters
**type**\: list of :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.Tunnels, self).__init__()
self.yang_name = "tunnels"
self.yang_parent_name = "control"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("tunnel", ("tunnel", L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel))])
self._leafs = OrderedDict()
self.tunnel = YList(self)
self._segment_path = lambda: "tunnels"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.Tunnels, [], name, value)
class Tunnel(_Entity_):
"""
L2TP tunnel control message counters
.. attribute:: tunnel_id (key)
L2TP tunnel ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: brief
L2TP control message local and remote addresses
**type**\: :py:class:`Brief <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Brief>`
**config**\: False
.. attribute:: global_
Global data
**type**\: :py:class:`Global <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel, self).__init__()
self.yang_name = "tunnel"
self.yang_parent_name = "tunnels"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['tunnel_id']
self._child_classes = OrderedDict([("brief", ("brief", L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Brief)), ("global", ("global_", L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global))])
self._leafs = OrderedDict([
('tunnel_id', (YLeaf(YType.uint32, 'tunnel-id'), ['int'])),
])
self.tunnel_id = None
self.brief = L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Brief()
self.brief.parent = self
self._children_name_map["brief"] = "brief"
self.global_ = L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global()
self.global_.parent = self
self._children_name_map["global_"] = "global"
self._segment_path = lambda: "tunnel" + "[tunnel-id='" + str(self.tunnel_id) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel, ['tunnel_id'], name, value)
class Brief(_Entity_):
"""
L2TP control message local and remote addresses
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: local_address
Local IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: remote_address
Remote IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Brief, self).__init__()
self.yang_name = "brief"
self.yang_parent_name = "tunnel"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('remote_tunnel_id', (YLeaf(YType.uint32, 'remote-tunnel-id'), ['int'])),
('local_address', (YLeaf(YType.str, 'local-address'), ['str'])),
('remote_address', (YLeaf(YType.str, 'remote-address'), ['str'])),
])
self.remote_tunnel_id = None
self.local_address = None
self.remote_address = None
self._segment_path = lambda: "brief"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Brief, ['remote_tunnel_id', 'local_address', 'remote_address'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Brief']['meta_info']
class Global(_Entity_):
"""
Global data
.. attribute:: transmit
Transmit data
**type**\: :py:class:`Transmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Transmit>`
**config**\: False
.. attribute:: retransmit
Re transmit data
**type**\: :py:class:`Retransmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Retransmit>`
**config**\: False
.. attribute:: received
Received data
**type**\: :py:class:`Received <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Received>`
**config**\: False
.. attribute:: drop
Drop data
**type**\: :py:class:`Drop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Drop>`
**config**\: False
.. attribute:: total_transmit
Total transmit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_retransmit
Total retransmit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_received
Total received
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_drop
Total drop
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global, self).__init__()
self.yang_name = "global"
self.yang_parent_name = "tunnel"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("transmit", ("transmit", L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Transmit)), ("retransmit", ("retransmit", L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Retransmit)), ("received", ("received", L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Received)), ("drop", ("drop", L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Drop))])
self._leafs = OrderedDict([
('total_transmit', (YLeaf(YType.uint32, 'total-transmit'), ['int'])),
('total_retransmit', (YLeaf(YType.uint32, 'total-retransmit'), ['int'])),
('total_received', (YLeaf(YType.uint32, 'total-received'), ['int'])),
('total_drop', (YLeaf(YType.uint32, 'total-drop'), ['int'])),
])
self.total_transmit = None
self.total_retransmit = None
self.total_received = None
self.total_drop = None
self.transmit = L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Transmit()
self.transmit.parent = self
self._children_name_map["transmit"] = "transmit"
self.retransmit = L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Retransmit()
self.retransmit.parent = self
self._children_name_map["retransmit"] = "retransmit"
self.received = L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Received()
self.received.parent = self
self._children_name_map["received"] = "received"
self.drop = L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Drop()
self.drop.parent = self
self._children_name_map["drop"] = "drop"
self._segment_path = lambda: "global"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global, ['total_transmit', 'total_retransmit', 'total_received', 'total_drop'], name, value)
class Transmit(_Entity_):
"""
Transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Transmit, self).__init__()
self.yang_name = "transmit"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', (YLeaf(YType.uint32, 'unknown-packets'), ['int'])),
('zero_length_body_packets', (YLeaf(YType.uint32, 'zero-length-body-packets'), ['int'])),
('start_control_connection_requests', (YLeaf(YType.uint32, 'start-control-connection-requests'), ['int'])),
('start_control_connection_replies', (YLeaf(YType.uint32, 'start-control-connection-replies'), ['int'])),
('start_control_connection_notifications', (YLeaf(YType.uint32, 'start-control-connection-notifications'), ['int'])),
('stop_control_connection_notifications', (YLeaf(YType.uint32, 'stop-control-connection-notifications'), ['int'])),
('hello_packets', (YLeaf(YType.uint32, 'hello-packets'), ['int'])),
('outgoing_call_requests', (YLeaf(YType.uint32, 'outgoing-call-requests'), ['int'])),
('outgoing_call_replies', (YLeaf(YType.uint32, 'outgoing-call-replies'), ['int'])),
('outgoing_call_connected_packets', (YLeaf(YType.uint32, 'outgoing-call-connected-packets'), ['int'])),
('incoming_call_requests', (YLeaf(YType.uint32, 'incoming-call-requests'), ['int'])),
('incoming_call_replies', (YLeaf(YType.uint32, 'incoming-call-replies'), ['int'])),
('incoming_call_connected_packets', (YLeaf(YType.uint32, 'incoming-call-connected-packets'), ['int'])),
('call_disconnect_notify_packets', (YLeaf(YType.uint32, 'call-disconnect-notify-packets'), ['int'])),
('wan_error_notify_packets', (YLeaf(YType.uint32, 'wan-error-notify-packets'), ['int'])),
('set_link_info_packets', (YLeaf(YType.uint32, 'set-link-info-packets'), ['int'])),
('service_relay_requests', (YLeaf(YType.uint32, 'service-relay-requests'), ['int'])),
('service_relay_replies', (YLeaf(YType.uint32, 'service-relay-replies'), ['int'])),
('acknowledgement_packets', (YLeaf(YType.uint32, 'acknowledgement-packets'), ['int'])),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "transmit"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Transmit, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Transmit']['meta_info']
class Retransmit(_Entity_):
"""
Re transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Retransmit, self).__init__()
self.yang_name = "retransmit"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', (YLeaf(YType.uint32, 'unknown-packets'), ['int'])),
('zero_length_body_packets', (YLeaf(YType.uint32, 'zero-length-body-packets'), ['int'])),
('start_control_connection_requests', (YLeaf(YType.uint32, 'start-control-connection-requests'), ['int'])),
('start_control_connection_replies', (YLeaf(YType.uint32, 'start-control-connection-replies'), ['int'])),
('start_control_connection_notifications', (YLeaf(YType.uint32, 'start-control-connection-notifications'), ['int'])),
('stop_control_connection_notifications', (YLeaf(YType.uint32, 'stop-control-connection-notifications'), ['int'])),
('hello_packets', (YLeaf(YType.uint32, 'hello-packets'), ['int'])),
('outgoing_call_requests', (YLeaf(YType.uint32, 'outgoing-call-requests'), ['int'])),
('outgoing_call_replies', (YLeaf(YType.uint32, 'outgoing-call-replies'), ['int'])),
('outgoing_call_connected_packets', (YLeaf(YType.uint32, 'outgoing-call-connected-packets'), ['int'])),
('incoming_call_requests', (YLeaf(YType.uint32, 'incoming-call-requests'), ['int'])),
('incoming_call_replies', (YLeaf(YType.uint32, 'incoming-call-replies'), ['int'])),
('incoming_call_connected_packets', (YLeaf(YType.uint32, 'incoming-call-connected-packets'), ['int'])),
('call_disconnect_notify_packets', (YLeaf(YType.uint32, 'call-disconnect-notify-packets'), ['int'])),
('wan_error_notify_packets', (YLeaf(YType.uint32, 'wan-error-notify-packets'), ['int'])),
('set_link_info_packets', (YLeaf(YType.uint32, 'set-link-info-packets'), ['int'])),
('service_relay_requests', (YLeaf(YType.uint32, 'service-relay-requests'), ['int'])),
('service_relay_replies', (YLeaf(YType.uint32, 'service-relay-replies'), ['int'])),
('acknowledgement_packets', (YLeaf(YType.uint32, 'acknowledgement-packets'), ['int'])),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "retransmit"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Retransmit, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Retransmit']['meta_info']
class Received(_Entity_):
"""
Received data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Received, self).__init__()
self.yang_name = "received"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', (YLeaf(YType.uint32, 'unknown-packets'), ['int'])),
('zero_length_body_packets', (YLeaf(YType.uint32, 'zero-length-body-packets'), ['int'])),
('start_control_connection_requests', (YLeaf(YType.uint32, 'start-control-connection-requests'), ['int'])),
('start_control_connection_replies', (YLeaf(YType.uint32, 'start-control-connection-replies'), ['int'])),
('start_control_connection_notifications', (YLeaf(YType.uint32, 'start-control-connection-notifications'), ['int'])),
('stop_control_connection_notifications', (YLeaf(YType.uint32, 'stop-control-connection-notifications'), ['int'])),
('hello_packets', (YLeaf(YType.uint32, 'hello-packets'), ['int'])),
('outgoing_call_requests', (YLeaf(YType.uint32, 'outgoing-call-requests'), ['int'])),
('outgoing_call_replies', (YLeaf(YType.uint32, 'outgoing-call-replies'), ['int'])),
('outgoing_call_connected_packets', (YLeaf(YType.uint32, 'outgoing-call-connected-packets'), ['int'])),
('incoming_call_requests', (YLeaf(YType.uint32, 'incoming-call-requests'), ['int'])),
('incoming_call_replies', (YLeaf(YType.uint32, 'incoming-call-replies'), ['int'])),
('incoming_call_connected_packets', (YLeaf(YType.uint32, 'incoming-call-connected-packets'), ['int'])),
('call_disconnect_notify_packets', (YLeaf(YType.uint32, 'call-disconnect-notify-packets'), ['int'])),
('wan_error_notify_packets', (YLeaf(YType.uint32, 'wan-error-notify-packets'), ['int'])),
('set_link_info_packets', (YLeaf(YType.uint32, 'set-link-info-packets'), ['int'])),
('service_relay_requests', (YLeaf(YType.uint32, 'service-relay-requests'), ['int'])),
('service_relay_replies', (YLeaf(YType.uint32, 'service-relay-replies'), ['int'])),
('acknowledgement_packets', (YLeaf(YType.uint32, 'acknowledgement-packets'), ['int'])),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "received"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Received, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Received']['meta_info']
class Drop(_Entity_):
"""
Drop data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Drop, self).__init__()
self.yang_name = "drop"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', (YLeaf(YType.uint32, 'unknown-packets'), ['int'])),
('zero_length_body_packets', (YLeaf(YType.uint32, 'zero-length-body-packets'), ['int'])),
('start_control_connection_requests', (YLeaf(YType.uint32, 'start-control-connection-requests'), ['int'])),
('start_control_connection_replies', (YLeaf(YType.uint32, 'start-control-connection-replies'), ['int'])),
('start_control_connection_notifications', (YLeaf(YType.uint32, 'start-control-connection-notifications'), ['int'])),
('stop_control_connection_notifications', (YLeaf(YType.uint32, 'stop-control-connection-notifications'), ['int'])),
('hello_packets', (YLeaf(YType.uint32, 'hello-packets'), ['int'])),
('outgoing_call_requests', (YLeaf(YType.uint32, 'outgoing-call-requests'), ['int'])),
('outgoing_call_replies', (YLeaf(YType.uint32, 'outgoing-call-replies'), ['int'])),
('outgoing_call_connected_packets', (YLeaf(YType.uint32, 'outgoing-call-connected-packets'), ['int'])),
('incoming_call_requests', (YLeaf(YType.uint32, 'incoming-call-requests'), ['int'])),
('incoming_call_replies', (YLeaf(YType.uint32, 'incoming-call-replies'), ['int'])),
('incoming_call_connected_packets', (YLeaf(YType.uint32, 'incoming-call-connected-packets'), ['int'])),
('call_disconnect_notify_packets', (YLeaf(YType.uint32, 'call-disconnect-notify-packets'), ['int'])),
('wan_error_notify_packets', (YLeaf(YType.uint32, 'wan-error-notify-packets'), ['int'])),
('set_link_info_packets', (YLeaf(YType.uint32, 'set-link-info-packets'), ['int'])),
('service_relay_requests', (YLeaf(YType.uint32, 'service-relay-requests'), ['int'])),
('service_relay_replies', (YLeaf(YType.uint32, 'service-relay-replies'), ['int'])),
('acknowledgement_packets', (YLeaf(YType.uint32, 'acknowledgement-packets'), ['int'])),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "drop"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Drop, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global.Drop']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel.Global']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.Tunnels.Tunnel']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control.Tunnels']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters.Control']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Counters']['meta_info']
class Statistics(_Entity_):
"""
L2TP v2 statistics information
.. attribute:: tunnels
Number of tunnels
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: sessions
Number of sessions
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: sent_packets
Number of packets sent
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: received_packets
Number of packets received
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: average_packet_processing_time
Average processing time for received packets (in micro seconds)
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: microsecond
.. attribute:: received_out_of_order_packets
Out of order packets received
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: reorder_packets
Re order packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: reorder_deviation_packets
Re order deviation
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: incoming_dropped_packets
In coming packets dropped
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: buffered_packets
Bufferred packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: netio_packets
Packets RX in netio
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Statistics, self).__init__()
self.yang_name = "statistics"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('tunnels', (YLeaf(YType.uint32, 'tunnels'), ['int'])),
('sessions', (YLeaf(YType.uint32, 'sessions'), ['int'])),
('sent_packets', (YLeaf(YType.uint32, 'sent-packets'), ['int'])),
('received_packets', (YLeaf(YType.uint32, 'received-packets'), ['int'])),
('average_packet_processing_time', (YLeaf(YType.uint32, 'average-packet-processing-time'), ['int'])),
('received_out_of_order_packets', (YLeaf(YType.uint32, 'received-out-of-order-packets'), ['int'])),
('reorder_packets', (YLeaf(YType.uint32, 'reorder-packets'), ['int'])),
('reorder_deviation_packets', (YLeaf(YType.uint32, 'reorder-deviation-packets'), ['int'])),
('incoming_dropped_packets', (YLeaf(YType.uint32, 'incoming-dropped-packets'), ['int'])),
('buffered_packets', (YLeaf(YType.uint32, 'buffered-packets'), ['int'])),
('netio_packets', (YLeaf(YType.uint32, 'netio-packets'), ['int'])),
])
self.tunnels = None
self.sessions = None
self.sent_packets = None
self.received_packets = None
self.average_packet_processing_time = None
self.received_out_of_order_packets = None
self.reorder_packets = None
self.reorder_deviation_packets = None
self.incoming_dropped_packets = None
self.buffered_packets = None
self.netio_packets = None
self._segment_path = lambda: "statistics"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Statistics, ['tunnels', 'sessions', 'sent_packets', 'received_packets', 'average_packet_processing_time', 'received_out_of_order_packets', 'reorder_packets', 'reorder_deviation_packets', 'incoming_dropped_packets', 'buffered_packets', 'netio_packets'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Statistics']['meta_info']
class Tunnel(_Entity_):
"""
L2TPv2 tunnel
.. attribute:: accounting
Tunnel accounting counters
**type**\: :py:class:`Accounting <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Tunnel.Accounting>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Tunnel, self).__init__()
self.yang_name = "tunnel"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("accounting", ("accounting", L2tpv2.Nodes.Node.Tunnel.Accounting))])
self._leafs = OrderedDict()
self.accounting = L2tpv2.Nodes.Node.Tunnel.Accounting()
self.accounting.parent = self
self._children_name_map["accounting"] = "accounting"
self._segment_path = lambda: "tunnel"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Tunnel, [], name, value)
class Accounting(_Entity_):
"""
Tunnel accounting counters
.. attribute:: statistics
Tunnel accounting statistics
**type**\: :py:class:`Statistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Tunnel.Accounting.Statistics>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Tunnel.Accounting, self).__init__()
self.yang_name = "accounting"
self.yang_parent_name = "tunnel"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("statistics", ("statistics", L2tpv2.Nodes.Node.Tunnel.Accounting.Statistics))])
self._leafs = OrderedDict()
self.statistics = L2tpv2.Nodes.Node.Tunnel.Accounting.Statistics()
self.statistics.parent = self
self._children_name_map["statistics"] = "statistics"
self._segment_path = lambda: "accounting"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Tunnel.Accounting, [], name, value)
class Statistics(_Entity_):
"""
Tunnel accounting statistics
.. attribute:: records_sent_successfully
Accounting records sent successfully
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: start
Accounting start
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: stop
Accounting stop
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: reject
Accounting reject
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: transport_failures
Transport failures
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: positive_acknowledgement
Positive acknowledgement
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: negative_acknowledgement
Negative acknowledgement
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: records_checkpointed
Total records checkpointed
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: records_failed_to_checkpoint
Records fail to checkpoint
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: records_sent_from_queue
Records sent from queue
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: memory_failures
Memory failures
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: current_size
Current checkpoint size
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: records_recovered_from_checkpoint
Records recovered from checkpoint
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: records_fail_to_recover
Records fail to recover
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: queue_statistics_size
Queue statistics size
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Tunnel.Accounting.Statistics, self).__init__()
self.yang_name = "statistics"
self.yang_parent_name = "accounting"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('records_sent_successfully', (YLeaf(YType.uint64, 'records-sent-successfully'), ['int'])),
('start', (YLeaf(YType.uint64, 'start'), ['int'])),
('stop', (YLeaf(YType.uint64, 'stop'), ['int'])),
('reject', (YLeaf(YType.uint64, 'reject'), ['int'])),
('transport_failures', (YLeaf(YType.uint64, 'transport-failures'), ['int'])),
('positive_acknowledgement', (YLeaf(YType.uint64, 'positive-acknowledgement'), ['int'])),
('negative_acknowledgement', (YLeaf(YType.uint64, 'negative-acknowledgement'), ['int'])),
('records_checkpointed', (YLeaf(YType.uint64, 'records-checkpointed'), ['int'])),
('records_failed_to_checkpoint', (YLeaf(YType.uint64, 'records-failed-to-checkpoint'), ['int'])),
('records_sent_from_queue', (YLeaf(YType.uint64, 'records-sent-from-queue'), ['int'])),
('memory_failures', (YLeaf(YType.uint32, 'memory-failures'), ['int'])),
('current_size', (YLeaf(YType.uint32, 'current-size'), ['int'])),
('records_recovered_from_checkpoint', (YLeaf(YType.uint32, 'records-recovered-from-checkpoint'), ['int'])),
('records_fail_to_recover', (YLeaf(YType.uint32, 'records-fail-to-recover'), ['int'])),
('queue_statistics_size', (YLeaf(YType.int32, 'queue-statistics-size'), ['int'])),
])
self.records_sent_successfully = None
self.start = None
self.stop = None
self.reject = None
self.transport_failures = None
self.positive_acknowledgement = None
self.negative_acknowledgement = None
self.records_checkpointed = None
self.records_failed_to_checkpoint = None
self.records_sent_from_queue = None
self.memory_failures = None
self.current_size = None
self.records_recovered_from_checkpoint = None
self.records_fail_to_recover = None
self.queue_statistics_size = None
self._segment_path = lambda: "statistics"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Tunnel.Accounting.Statistics, ['records_sent_successfully', 'start', 'stop', 'reject', 'transport_failures', 'positive_acknowledgement', 'negative_acknowledgement', 'records_checkpointed', 'records_failed_to_checkpoint', 'records_sent_from_queue', 'memory_failures', 'current_size', 'records_recovered_from_checkpoint', 'records_fail_to_recover', 'queue_statistics_size'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Tunnel.Accounting.Statistics']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Tunnel.Accounting']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Tunnel']['meta_info']
class TunnelConfigurations(_Entity_):
"""
List of tunnel IDs
.. attribute:: tunnel_configuration
L2TP tunnel information
**type**\: list of :py:class:`TunnelConfiguration <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.TunnelConfigurations.TunnelConfiguration>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.TunnelConfigurations, self).__init__()
self.yang_name = "tunnel-configurations"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("tunnel-configuration", ("tunnel_configuration", L2tpv2.Nodes.Node.TunnelConfigurations.TunnelConfiguration))])
self._leafs = OrderedDict()
self.tunnel_configuration = YList(self)
self._segment_path = lambda: "tunnel-configurations"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.TunnelConfigurations, [], name, value)
class TunnelConfiguration(_Entity_):
"""
L2TP tunnel information
.. attribute:: local_tunnel_id (key)
Local tunnel ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_class
L2Tp class data
**type**\: :py:class:`L2tpClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.TunnelConfigurations.TunnelConfiguration.L2tpClass>`
**config**\: False
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.TunnelConfigurations.TunnelConfiguration, self).__init__()
self.yang_name = "tunnel-configuration"
self.yang_parent_name = "tunnel-configurations"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['local_tunnel_id']
self._child_classes = OrderedDict([("l2tp-class", ("l2tp_class", L2tpv2.Nodes.Node.TunnelConfigurations.TunnelConfiguration.L2tpClass))])
self._leafs = OrderedDict([
('local_tunnel_id', (YLeaf(YType.uint32, 'local-tunnel-id'), ['int'])),
('remote_tunnel_id', (YLeaf(YType.uint32, 'remote-tunnel-id'), ['int'])),
])
self.local_tunnel_id = None
self.remote_tunnel_id = None
self.l2tp_class = L2tpv2.Nodes.Node.TunnelConfigurations.TunnelConfiguration.L2tpClass()
self.l2tp_class.parent = self
self._children_name_map["l2tp_class"] = "l2tp-class"
self._segment_path = lambda: "tunnel-configuration" + "[local-tunnel-id='" + str(self.local_tunnel_id) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.TunnelConfigurations.TunnelConfiguration, ['local_tunnel_id', 'remote_tunnel_id'], name, value)
class L2tpClass(_Entity_):
"""
L2Tp class data
.. attribute:: ip_tos
IP TOS
**type**\: int
**range:** 0..255
**config**\: False
.. attribute:: vrf_name
VRF name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: receive_window_size
Receive window size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: class_name_xr
Class name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: digest_hash
Hash configured as MD5 or SHA1
**type**\: :py:class:`DigestHash <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.DigestHash>`
**config**\: False
.. attribute:: password
Password
**type**\: str
**length:** 0..25
**config**\: False
.. attribute:: encoded_password
Encoded password
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: host_name
Host name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: accounting_method_list
Accounting List
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: hello_timeout
Hello timeout value in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: setup_timeout
Timeout setup value in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: retransmit_minimum_timeout
Retransmit minimum timeout in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: retransmit_maximum_timeout
Retransmit maximum timeout in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: initial_retransmit_minimum_timeout
Initial timeout minimum in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: initial_retransmit_maximum_timeout
Initial timeout maximum in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: timeout_no_user
Timeout no user
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: retransmit_retries
Retransmit retries
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: initial_retransmit_retries
Initial retransmit retries
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: is_authentication_enabled
True if authentication is enabled
**type**\: bool
**config**\: False
.. attribute:: is_hidden
True if class is hidden
**type**\: bool
**config**\: False
.. attribute:: is_digest_enabled
True if digest authentication is enabled
**type**\: bool
**config**\: False
.. attribute:: is_digest_check_enabled
True if digest check is enabled
**type**\: bool
**config**\: False
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled
**type**\: bool
**config**\: False
.. attribute:: is_peer_address_checked
True if peer address is checked
**type**\: bool
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.TunnelConfigurations.TunnelConfiguration.L2tpClass, self).__init__()
self.yang_name = "l2tp-class"
self.yang_parent_name = "tunnel-configuration"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('ip_tos', (YLeaf(YType.uint8, 'ip-tos'), ['int'])),
('vrf_name', (YLeaf(YType.str, 'vrf-name'), ['str'])),
('receive_window_size', (YLeaf(YType.uint16, 'receive-window-size'), ['int'])),
('class_name_xr', (YLeaf(YType.str, 'class-name-xr'), ['str'])),
('digest_hash', (YLeaf(YType.enumeration, 'digest-hash'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper', 'DigestHash', '')])),
('password', (YLeaf(YType.str, 'password'), ['str'])),
('encoded_password', (YLeaf(YType.str, 'encoded-password'), ['str'])),
('host_name', (YLeaf(YType.str, 'host-name'), ['str'])),
('accounting_method_list', (YLeaf(YType.str, 'accounting-method-list'), ['str'])),
('hello_timeout', (YLeaf(YType.uint32, 'hello-timeout'), ['int'])),
('setup_timeout', (YLeaf(YType.uint32, 'setup-timeout'), ['int'])),
('retransmit_minimum_timeout', (YLeaf(YType.uint32, 'retransmit-minimum-timeout'), ['int'])),
('retransmit_maximum_timeout', (YLeaf(YType.uint32, 'retransmit-maximum-timeout'), ['int'])),
('initial_retransmit_minimum_timeout', (YLeaf(YType.uint32, 'initial-retransmit-minimum-timeout'), ['int'])),
('initial_retransmit_maximum_timeout', (YLeaf(YType.uint32, 'initial-retransmit-maximum-timeout'), ['int'])),
('timeout_no_user', (YLeaf(YType.uint32, 'timeout-no-user'), ['int'])),
('retransmit_retries', (YLeaf(YType.uint32, 'retransmit-retries'), ['int'])),
('initial_retransmit_retries', (YLeaf(YType.uint32, 'initial-retransmit-retries'), ['int'])),
('is_authentication_enabled', (YLeaf(YType.boolean, 'is-authentication-enabled'), ['bool'])),
('is_hidden', (YLeaf(YType.boolean, 'is-hidden'), ['bool'])),
('is_digest_enabled', (YLeaf(YType.boolean, 'is-digest-enabled'), ['bool'])),
('is_digest_check_enabled', (YLeaf(YType.boolean, 'is-digest-check-enabled'), ['bool'])),
('is_congestion_control_enabled', (YLeaf(YType.boolean, 'is-congestion-control-enabled'), ['bool'])),
('is_peer_address_checked', (YLeaf(YType.boolean, 'is-peer-address-checked'), ['bool'])),
])
self.ip_tos = None
self.vrf_name = None
self.receive_window_size = None
self.class_name_xr = None
self.digest_hash = None
self.password = None
self.encoded_password = None
self.host_name = None
self.accounting_method_list = None
self.hello_timeout = None
self.setup_timeout = None
self.retransmit_minimum_timeout = None
self.retransmit_maximum_timeout = None
self.initial_retransmit_minimum_timeout = None
self.initial_retransmit_maximum_timeout = None
self.timeout_no_user = None
self.retransmit_retries = None
self.initial_retransmit_retries = None
self.is_authentication_enabled = None
self.is_hidden = None
self.is_digest_enabled = None
self.is_digest_check_enabled = None
self.is_congestion_control_enabled = None
self.is_peer_address_checked = None
self._segment_path = lambda: "l2tp-class"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.TunnelConfigurations.TunnelConfiguration.L2tpClass, ['ip_tos', 'vrf_name', 'receive_window_size', 'class_name_xr', 'digest_hash', 'password', 'encoded_password', 'host_name', 'accounting_method_list', 'hello_timeout', 'setup_timeout', 'retransmit_minimum_timeout', 'retransmit_maximum_timeout', 'initial_retransmit_minimum_timeout', 'initial_retransmit_maximum_timeout', 'timeout_no_user', 'retransmit_retries', 'initial_retransmit_retries', 'is_authentication_enabled', 'is_hidden', 'is_digest_enabled', 'is_digest_check_enabled', 'is_congestion_control_enabled', 'is_peer_address_checked'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.TunnelConfigurations.TunnelConfiguration.L2tpClass']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.TunnelConfigurations.TunnelConfiguration']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.TunnelConfigurations']['meta_info']
class CounterHistFail(_Entity_):
"""
Failure events leading to disconnection
.. attribute:: sess_down_tmout
sesions affected due to timeout
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: tx_counters
Send side counters
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
**config**\: False
.. attribute:: rx_counters
Receive side counters
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
**config**\: False
.. attribute:: pkt_timeout
timeout events by packet
**type**\: list of :py:class:`PktTimeout <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.CounterHistFail.PktTimeout>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.CounterHistFail, self).__init__()
self.yang_name = "counter-hist-fail"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("pkt-timeout", ("pkt_timeout", L2tpv2.Nodes.Node.CounterHistFail.PktTimeout))])
self._leafs = OrderedDict([
('sess_down_tmout', (YLeaf(YType.uint32, 'sess-down-tmout'), ['int'])),
('tx_counters', (YLeaf(YType.str, 'tx-counters'), ['str'])),
('rx_counters', (YLeaf(YType.str, 'rx-counters'), ['str'])),
])
self.sess_down_tmout = None
self.tx_counters = None
self.rx_counters = None
self.pkt_timeout = YList(self)
self._segment_path = lambda: "counter-hist-fail"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.CounterHistFail, ['sess_down_tmout', 'tx_counters', 'rx_counters'], name, value)
class PktTimeout(_Entity_):
"""
timeout events by packet
.. attribute:: entry
timeout events by packet
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.CounterHistFail.PktTimeout, self).__init__()
self.yang_name = "pkt-timeout"
self.yang_parent_name = "counter-hist-fail"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('entry', (YLeaf(YType.uint32, 'entry'), ['int'])),
])
self.entry = None
self._segment_path = lambda: "pkt-timeout"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.CounterHistFail.PktTimeout, ['entry'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.CounterHistFail.PktTimeout']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.CounterHistFail']['meta_info']
class Classes(_Entity_):
"""
List of L2TP class names
.. attribute:: class_
L2TP class name
**type**\: list of :py:class:`Class <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Classes.Class>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Classes, self).__init__()
self.yang_name = "classes"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("class", ("class_", L2tpv2.Nodes.Node.Classes.Class))])
self._leafs = OrderedDict()
self.class_ = YList(self)
self._segment_path = lambda: "classes"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Classes, [], name, value)
class Class(_Entity_):
"""
L2TP class name
.. attribute:: class_name (key)
L2TP class name
**type**\: str
**length:** 1..31
**config**\: False
.. attribute:: ip_tos
IP TOS
**type**\: int
**range:** 0..255
**config**\: False
.. attribute:: vrf_name
VRF name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: receive_window_size
Receive window size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: class_name_xr
Class name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: digest_hash
Hash configured as MD5 or SHA1
**type**\: :py:class:`DigestHash <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.DigestHash>`
**config**\: False
.. attribute:: password
Password
**type**\: str
**length:** 0..25
**config**\: False
.. attribute:: encoded_password
Encoded password
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: host_name
Host name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: accounting_method_list
Accounting List
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: hello_timeout
Hello timeout value in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: setup_timeout
Timeout setup value in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: retransmit_minimum_timeout
Retransmit minimum timeout in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: retransmit_maximum_timeout
Retransmit maximum timeout in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: initial_retransmit_minimum_timeout
Initial timeout minimum in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: initial_retransmit_maximum_timeout
Initial timeout maximum in seconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: timeout_no_user
Timeout no user
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: retransmit_retries
Retransmit retries
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: initial_retransmit_retries
Initial retransmit retries
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: is_authentication_enabled
True if authentication is enabled
**type**\: bool
**config**\: False
.. attribute:: is_hidden
True if class is hidden
**type**\: bool
**config**\: False
.. attribute:: is_digest_enabled
True if digest authentication is enabled
**type**\: bool
**config**\: False
.. attribute:: is_digest_check_enabled
True if digest check is enabled
**type**\: bool
**config**\: False
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled
**type**\: bool
**config**\: False
.. attribute:: is_peer_address_checked
True if peer address is checked
**type**\: bool
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Classes.Class, self).__init__()
self.yang_name = "class"
self.yang_parent_name = "classes"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['class_name']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('class_name', (YLeaf(YType.str, 'class-name'), ['str'])),
('ip_tos', (YLeaf(YType.uint8, 'ip-tos'), ['int'])),
('vrf_name', (YLeaf(YType.str, 'vrf-name'), ['str'])),
('receive_window_size', (YLeaf(YType.uint16, 'receive-window-size'), ['int'])),
('class_name_xr', (YLeaf(YType.str, 'class-name-xr'), ['str'])),
('digest_hash', (YLeaf(YType.enumeration, 'digest-hash'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper', 'DigestHash', '')])),
('password', (YLeaf(YType.str, 'password'), ['str'])),
('encoded_password', (YLeaf(YType.str, 'encoded-password'), ['str'])),
('host_name', (YLeaf(YType.str, 'host-name'), ['str'])),
('accounting_method_list', (YLeaf(YType.str, 'accounting-method-list'), ['str'])),
('hello_timeout', (YLeaf(YType.uint32, 'hello-timeout'), ['int'])),
('setup_timeout', (YLeaf(YType.uint32, 'setup-timeout'), ['int'])),
('retransmit_minimum_timeout', (YLeaf(YType.uint32, 'retransmit-minimum-timeout'), ['int'])),
('retransmit_maximum_timeout', (YLeaf(YType.uint32, 'retransmit-maximum-timeout'), ['int'])),
('initial_retransmit_minimum_timeout', (YLeaf(YType.uint32, 'initial-retransmit-minimum-timeout'), ['int'])),
('initial_retransmit_maximum_timeout', (YLeaf(YType.uint32, 'initial-retransmit-maximum-timeout'), ['int'])),
('timeout_no_user', (YLeaf(YType.uint32, 'timeout-no-user'), ['int'])),
('retransmit_retries', (YLeaf(YType.uint32, 'retransmit-retries'), ['int'])),
('initial_retransmit_retries', (YLeaf(YType.uint32, 'initial-retransmit-retries'), ['int'])),
('is_authentication_enabled', (YLeaf(YType.boolean, 'is-authentication-enabled'), ['bool'])),
('is_hidden', (YLeaf(YType.boolean, 'is-hidden'), ['bool'])),
('is_digest_enabled', (YLeaf(YType.boolean, 'is-digest-enabled'), ['bool'])),
('is_digest_check_enabled', (YLeaf(YType.boolean, 'is-digest-check-enabled'), ['bool'])),
('is_congestion_control_enabled', (YLeaf(YType.boolean, 'is-congestion-control-enabled'), ['bool'])),
('is_peer_address_checked', (YLeaf(YType.boolean, 'is-peer-address-checked'), ['bool'])),
])
self.class_name = None
self.ip_tos = None
self.vrf_name = None
self.receive_window_size = None
self.class_name_xr = None
self.digest_hash = None
self.password = None
self.encoded_password = None
self.host_name = None
self.accounting_method_list = None
self.hello_timeout = None
self.setup_timeout = None
self.retransmit_minimum_timeout = None
self.retransmit_maximum_timeout = None
self.initial_retransmit_minimum_timeout = None
self.initial_retransmit_maximum_timeout = None
self.timeout_no_user = None
self.retransmit_retries = None
self.initial_retransmit_retries = None
self.is_authentication_enabled = None
self.is_hidden = None
self.is_digest_enabled = None
self.is_digest_check_enabled = None
self.is_congestion_control_enabled = None
self.is_peer_address_checked = None
self._segment_path = lambda: "class" + "[class-name='" + str(self.class_name) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Classes.Class, ['class_name', 'ip_tos', 'vrf_name', 'receive_window_size', 'class_name_xr', 'digest_hash', 'password', 'encoded_password', 'host_name', 'accounting_method_list', 'hello_timeout', 'setup_timeout', 'retransmit_minimum_timeout', 'retransmit_maximum_timeout', 'initial_retransmit_minimum_timeout', 'initial_retransmit_maximum_timeout', 'timeout_no_user', 'retransmit_retries', 'initial_retransmit_retries', 'is_authentication_enabled', 'is_hidden', 'is_digest_enabled', 'is_digest_check_enabled', 'is_congestion_control_enabled', 'is_peer_address_checked'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Classes.Class']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Classes']['meta_info']
class Tunnels(_Entity_):
"""
List of tunnel IDs
.. attribute:: tunnel
L2TP tunnel information
**type**\: list of :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Tunnels.Tunnel>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Tunnels, self).__init__()
self.yang_name = "tunnels"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("tunnel", ("tunnel", L2tpv2.Nodes.Node.Tunnels.Tunnel))])
self._leafs = OrderedDict()
self.tunnel = YList(self)
self._segment_path = lambda: "tunnels"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Tunnels, [], name, value)
class Tunnel(_Entity_):
"""
L2TP tunnel information
.. attribute:: local_tunnel_id (key)
Local tunnel ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: local_address
Local tunnel address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: remote_address
Remote tunnel address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: local_port
Local port
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: remote_port
Remote port
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: protocol
Protocol
**type**\: int
**range:** 0..255
**config**\: False
.. attribute:: is_pmtu_enabled
True if tunnel PMTU checking is enabled
**type**\: bool
**config**\: False
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: local_tunnel_name
Local tunnel name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: remote_tunnel_name
Remote tunnel name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: class_name
L2TP class name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: active_sessions
Number of active sessions
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: sequence_ns
Sequence NS
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: sequence_nr
Sequence NR
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: local_window_size
Local window size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: remote_window_size
Remote window size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: retransmission_time
Retransmission time in seconds
**type**\: int
**range:** 0..65535
**config**\: False
**units**\: second
.. attribute:: maximum_retransmission_time
Maximum retransmission time in seconds
**type**\: int
**range:** 0..65535
**config**\: False
**units**\: second
.. attribute:: unsent_queue_size
Unsent queue size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: unsent_maximum_queue_size
Unsent maximum queue size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: resend_queue_size
Resend queue size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: resend_maximum_queue_size
Resend maximum queue size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: order_queue_size
Order queue size
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: packet_queue_check
Current number session packet queue check
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: digest_secrets
Control message authentication with digest secrets
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: resends
Total resends
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: zero_length_body_acknowledgement_sent
Total zero length body acknowledgement
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_out_of_order_drop_packets
Total out of order dropped packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_out_of_order_reorder_packets
Total out of order reorder packets
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: total_peer_authentication_failures
Number of peer authentication failures
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: is_tunnel_up
True if tunnel is up
**type**\: bool
**config**\: False
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled else false
**type**\: bool
**config**\: False
.. attribute:: retransmit_time
Retransmit time distribution in seconds
**type**\: list of :py:class:`RetransmitTime <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Tunnels.Tunnel.RetransmitTime>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Tunnels.Tunnel, self).__init__()
self.yang_name = "tunnel"
self.yang_parent_name = "tunnels"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['local_tunnel_id']
self._child_classes = OrderedDict([("retransmit-time", ("retransmit_time", L2tpv2.Nodes.Node.Tunnels.Tunnel.RetransmitTime))])
self._leafs = OrderedDict([
('local_tunnel_id', (YLeaf(YType.uint32, 'local-tunnel-id'), ['int'])),
('local_address', (YLeaf(YType.str, 'local-address'), ['str'])),
('remote_address', (YLeaf(YType.str, 'remote-address'), ['str'])),
('local_port', (YLeaf(YType.uint16, 'local-port'), ['int'])),
('remote_port', (YLeaf(YType.uint16, 'remote-port'), ['int'])),
('protocol', (YLeaf(YType.uint8, 'protocol'), ['int'])),
('is_pmtu_enabled', (YLeaf(YType.boolean, 'is-pmtu-enabled'), ['bool'])),
('remote_tunnel_id', (YLeaf(YType.uint32, 'remote-tunnel-id'), ['int'])),
('local_tunnel_name', (YLeaf(YType.str, 'local-tunnel-name'), ['str'])),
('remote_tunnel_name', (YLeaf(YType.str, 'remote-tunnel-name'), ['str'])),
('class_name', (YLeaf(YType.str, 'class-name'), ['str'])),
('active_sessions', (YLeaf(YType.uint32, 'active-sessions'), ['int'])),
('sequence_ns', (YLeaf(YType.uint16, 'sequence-ns'), ['int'])),
('sequence_nr', (YLeaf(YType.uint16, 'sequence-nr'), ['int'])),
('local_window_size', (YLeaf(YType.uint16, 'local-window-size'), ['int'])),
('remote_window_size', (YLeaf(YType.uint16, 'remote-window-size'), ['int'])),
('retransmission_time', (YLeaf(YType.uint16, 'retransmission-time'), ['int'])),
('maximum_retransmission_time', (YLeaf(YType.uint16, 'maximum-retransmission-time'), ['int'])),
('unsent_queue_size', (YLeaf(YType.uint16, 'unsent-queue-size'), ['int'])),
('unsent_maximum_queue_size', (YLeaf(YType.uint16, 'unsent-maximum-queue-size'), ['int'])),
('resend_queue_size', (YLeaf(YType.uint16, 'resend-queue-size'), ['int'])),
('resend_maximum_queue_size', (YLeaf(YType.uint16, 'resend-maximum-queue-size'), ['int'])),
('order_queue_size', (YLeaf(YType.uint16, 'order-queue-size'), ['int'])),
('packet_queue_check', (YLeaf(YType.uint16, 'packet-queue-check'), ['int'])),
('digest_secrets', (YLeaf(YType.uint16, 'digest-secrets'), ['int'])),
('resends', (YLeaf(YType.uint32, 'resends'), ['int'])),
('zero_length_body_acknowledgement_sent', (YLeaf(YType.uint32, 'zero-length-body-acknowledgement-sent'), ['int'])),
('total_out_of_order_drop_packets', (YLeaf(YType.uint32, 'total-out-of-order-drop-packets'), ['int'])),
('total_out_of_order_reorder_packets', (YLeaf(YType.uint32, 'total-out-of-order-reorder-packets'), ['int'])),
('total_peer_authentication_failures', (YLeaf(YType.uint32, 'total-peer-authentication-failures'), ['int'])),
('is_tunnel_up', (YLeaf(YType.boolean, 'is-tunnel-up'), ['bool'])),
('is_congestion_control_enabled', (YLeaf(YType.boolean, 'is-congestion-control-enabled'), ['bool'])),
])
self.local_tunnel_id = None
self.local_address = None
self.remote_address = None
self.local_port = None
self.remote_port = None
self.protocol = None
self.is_pmtu_enabled = None
self.remote_tunnel_id = None
self.local_tunnel_name = None
self.remote_tunnel_name = None
self.class_name = None
self.active_sessions = None
self.sequence_ns = None
self.sequence_nr = None
self.local_window_size = None
self.remote_window_size = None
self.retransmission_time = None
self.maximum_retransmission_time = None
self.unsent_queue_size = None
self.unsent_maximum_queue_size = None
self.resend_queue_size = None
self.resend_maximum_queue_size = None
self.order_queue_size = None
self.packet_queue_check = None
self.digest_secrets = None
self.resends = None
self.zero_length_body_acknowledgement_sent = None
self.total_out_of_order_drop_packets = None
self.total_out_of_order_reorder_packets = None
self.total_peer_authentication_failures = None
self.is_tunnel_up = None
self.is_congestion_control_enabled = None
self.retransmit_time = YList(self)
self._segment_path = lambda: "tunnel" + "[local-tunnel-id='" + str(self.local_tunnel_id) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Tunnels.Tunnel, ['local_tunnel_id', 'local_address', 'remote_address', 'local_port', 'remote_port', 'protocol', 'is_pmtu_enabled', 'remote_tunnel_id', 'local_tunnel_name', 'remote_tunnel_name', 'class_name', 'active_sessions', 'sequence_ns', 'sequence_nr', 'local_window_size', 'remote_window_size', 'retransmission_time', 'maximum_retransmission_time', 'unsent_queue_size', 'unsent_maximum_queue_size', 'resend_queue_size', 'resend_maximum_queue_size', 'order_queue_size', 'packet_queue_check', 'digest_secrets', 'resends', 'zero_length_body_acknowledgement_sent', 'total_out_of_order_drop_packets', 'total_out_of_order_reorder_packets', 'total_peer_authentication_failures', 'is_tunnel_up', 'is_congestion_control_enabled'], name, value)
class RetransmitTime(_Entity_):
"""
Retransmit time distribution in seconds
.. attribute:: entry
Retransmit time distribution in seconds
**type**\: int
**range:** 0..65535
**config**\: False
**units**\: second
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Tunnels.Tunnel.RetransmitTime, self).__init__()
self.yang_name = "retransmit-time"
self.yang_parent_name = "tunnel"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('entry', (YLeaf(YType.uint16, 'entry'), ['int'])),
])
self.entry = None
self._segment_path = lambda: "retransmit-time"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Tunnels.Tunnel.RetransmitTime, ['entry'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Tunnels.Tunnel.RetransmitTime']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Tunnels.Tunnel']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Tunnels']['meta_info']
class Sessions(_Entity_):
"""
List of session IDs
.. attribute:: session
L2TP information for a particular session
**type**\: list of :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Sessions.Session>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Sessions, self).__init__()
self.yang_name = "sessions"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("session", ("session", L2tpv2.Nodes.Node.Sessions.Session))])
self._leafs = OrderedDict()
self.session = YList(self)
self._segment_path = lambda: "sessions"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Sessions, [], name, value)
class Session(_Entity_):
"""
L2TP information for a particular session
.. attribute:: local_tunnel_id (key)
Local tunnel ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: local_session_id (key)
Local session ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: session_application_data
Session application data
**type**\: :py:class:`SessionApplicationData <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData>`
**config**\: False
.. attribute:: local_ip_address
Local session IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: remote_ip_address
Remote session IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: l2tp_sh_sess_udp_lport
l2tp sh sess udp lport
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: l2tp_sh_sess_udp_rport
l2tp sh sess udp rport
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: protocol
Protocol
**type**\: int
**range:** 0..255
**config**\: False
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: call_serial_number
Call serial number
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: local_tunnel_name
Local tunnel name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: remote_tunnel_name
Remote tunnel name
**type**\: str
**length:** 0..256
**config**\: False
.. attribute:: remote_session_id
Remote session ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_sess_tie_breaker_enabled
l2tp sh sess tie breaker enabled
**type**\: int
**range:** 0..255
**config**\: False
.. attribute:: l2tp_sh_sess_tie_breaker
l2tp sh sess tie breaker
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: is_session_manual
True if session is manual
**type**\: bool
**config**\: False
.. attribute:: is_session_up
True if session is up
**type**\: bool
**config**\: False
.. attribute:: is_udp_checksum_enabled
True if UDP checksum enabled
**type**\: bool
**config**\: False
.. attribute:: is_sequencing_on
True if session sequence is on
**type**\: bool
**config**\: False
.. attribute:: is_session_state_established
True if session state is established
**type**\: bool
**config**\: False
.. attribute:: is_session_locally_initiated
True if session initiated locally
**type**\: bool
**config**\: False
.. attribute:: is_conditional_debug_enabled
True if conditional debugging is enabled
**type**\: bool
**config**\: False
.. attribute:: unique_id
Unique ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: interface_name
Interface name
**type**\: str
**length:** 0..256
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Sessions.Session, self).__init__()
self.yang_name = "session"
self.yang_parent_name = "sessions"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['local_tunnel_id','local_session_id']
self._child_classes = OrderedDict([("session-application-data", ("session_application_data", L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData))])
self._leafs = OrderedDict([
('local_tunnel_id', (YLeaf(YType.uint32, 'local-tunnel-id'), ['int'])),
('local_session_id', (YLeaf(YType.uint32, 'local-session-id'), ['int'])),
('local_ip_address', (YLeaf(YType.str, 'local-ip-address'), ['str'])),
('remote_ip_address', (YLeaf(YType.str, 'remote-ip-address'), ['str'])),
('l2tp_sh_sess_udp_lport', (YLeaf(YType.uint16, 'l2tp-sh-sess-udp-lport'), ['int'])),
('l2tp_sh_sess_udp_rport', (YLeaf(YType.uint16, 'l2tp-sh-sess-udp-rport'), ['int'])),
('protocol', (YLeaf(YType.uint8, 'protocol'), ['int'])),
('remote_tunnel_id', (YLeaf(YType.uint32, 'remote-tunnel-id'), ['int'])),
('call_serial_number', (YLeaf(YType.uint32, 'call-serial-number'), ['int'])),
('local_tunnel_name', (YLeaf(YType.str, 'local-tunnel-name'), ['str'])),
('remote_tunnel_name', (YLeaf(YType.str, 'remote-tunnel-name'), ['str'])),
('remote_session_id', (YLeaf(YType.uint32, 'remote-session-id'), ['int'])),
('l2tp_sh_sess_tie_breaker_enabled', (YLeaf(YType.uint8, 'l2tp-sh-sess-tie-breaker-enabled'), ['int'])),
('l2tp_sh_sess_tie_breaker', (YLeaf(YType.uint64, 'l2tp-sh-sess-tie-breaker'), ['int'])),
('is_session_manual', (YLeaf(YType.boolean, 'is-session-manual'), ['bool'])),
('is_session_up', (YLeaf(YType.boolean, 'is-session-up'), ['bool'])),
('is_udp_checksum_enabled', (YLeaf(YType.boolean, 'is-udp-checksum-enabled'), ['bool'])),
('is_sequencing_on', (YLeaf(YType.boolean, 'is-sequencing-on'), ['bool'])),
('is_session_state_established', (YLeaf(YType.boolean, 'is-session-state-established'), ['bool'])),
('is_session_locally_initiated', (YLeaf(YType.boolean, 'is-session-locally-initiated'), ['bool'])),
('is_conditional_debug_enabled', (YLeaf(YType.boolean, 'is-conditional-debug-enabled'), ['bool'])),
('unique_id', (YLeaf(YType.uint32, 'unique-id'), ['int'])),
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
])
self.local_tunnel_id = None
self.local_session_id = None
self.local_ip_address = None
self.remote_ip_address = None
self.l2tp_sh_sess_udp_lport = None
self.l2tp_sh_sess_udp_rport = None
self.protocol = None
self.remote_tunnel_id = None
self.call_serial_number = None
self.local_tunnel_name = None
self.remote_tunnel_name = None
self.remote_session_id = None
self.l2tp_sh_sess_tie_breaker_enabled = None
self.l2tp_sh_sess_tie_breaker = None
self.is_session_manual = None
self.is_session_up = None
self.is_udp_checksum_enabled = None
self.is_sequencing_on = None
self.is_session_state_established = None
self.is_session_locally_initiated = None
self.is_conditional_debug_enabled = None
self.unique_id = None
self.interface_name = None
self.session_application_data = L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData()
self.session_application_data.parent = self
self._children_name_map["session_application_data"] = "session-application-data"
self._segment_path = lambda: "session" + "[local-tunnel-id='" + str(self.local_tunnel_id) + "']" + "[local-session-id='" + str(self.local_session_id) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Sessions.Session, ['local_tunnel_id', 'local_session_id', 'local_ip_address', 'remote_ip_address', 'l2tp_sh_sess_udp_lport', 'l2tp_sh_sess_udp_rport', 'protocol', 'remote_tunnel_id', 'call_serial_number', 'local_tunnel_name', 'remote_tunnel_name', 'remote_session_id', 'l2tp_sh_sess_tie_breaker_enabled', 'l2tp_sh_sess_tie_breaker', 'is_session_manual', 'is_session_up', 'is_udp_checksum_enabled', 'is_sequencing_on', 'is_session_state_established', 'is_session_locally_initiated', 'is_conditional_debug_enabled', 'unique_id', 'interface_name'], name, value)
class SessionApplicationData(_Entity_):
"""
Session application data
.. attribute:: xconnect
Xconnect data
**type**\: :py:class:`Xconnect <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData.Xconnect>`
**config**\: False
.. attribute:: vpdn
VPDN data
**type**\: :py:class:`Vpdn <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData.Vpdn>`
**config**\: False
.. attribute:: l2tp_sh_sess_app_type
l2tp sh sess app type
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData, self).__init__()
self.yang_name = "session-application-data"
self.yang_parent_name = "session"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("xconnect", ("xconnect", L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData.Xconnect)), ("vpdn", ("vpdn", L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData.Vpdn))])
self._leafs = OrderedDict([
('l2tp_sh_sess_app_type', (YLeaf(YType.uint32, 'l2tp-sh-sess-app-type'), ['int'])),
])
self.l2tp_sh_sess_app_type = None
self.xconnect = L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData.Xconnect()
self.xconnect.parent = self
self._children_name_map["xconnect"] = "xconnect"
self.vpdn = L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData.Vpdn()
self.vpdn.parent = self
self._children_name_map["vpdn"] = "vpdn"
self._segment_path = lambda: "session-application-data"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData, ['l2tp_sh_sess_app_type'], name, value)
class Xconnect(_Entity_):
"""
Xconnect data
.. attribute:: circuit_name
Circuit name
**type**\: str
**config**\: False
.. attribute:: sessionvc_id
Session VC ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: is_circuit_state_up
True if circuit state is up
**type**\: bool
**config**\: False
.. attribute:: is_local_circuit_state_up
True if local circuit state is up
**type**\: bool
**config**\: False
.. attribute:: is_remote_circuit_state_up
True if remote circuit state is up
**type**\: bool
**config**\: False
.. attribute:: ipv6_protocol_tunneling
IPv6ProtocolTunneling
**type**\: bool
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData.Xconnect, self).__init__()
self.yang_name = "xconnect"
self.yang_parent_name = "session-application-data"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('circuit_name', (YLeaf(YType.str, 'circuit-name'), ['str'])),
('sessionvc_id', (YLeaf(YType.uint32, 'sessionvc-id'), ['int'])),
('is_circuit_state_up', (YLeaf(YType.boolean, 'is-circuit-state-up'), ['bool'])),
('is_local_circuit_state_up', (YLeaf(YType.boolean, 'is-local-circuit-state-up'), ['bool'])),
('is_remote_circuit_state_up', (YLeaf(YType.boolean, 'is-remote-circuit-state-up'), ['bool'])),
('ipv6_protocol_tunneling', (YLeaf(YType.boolean, 'ipv6-protocol-tunneling'), ['bool'])),
])
self.circuit_name = None
self.sessionvc_id = None
self.is_circuit_state_up = None
self.is_local_circuit_state_up = None
self.is_remote_circuit_state_up = None
self.ipv6_protocol_tunneling = None
self._segment_path = lambda: "xconnect"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData.Xconnect, ['circuit_name', 'sessionvc_id', 'is_circuit_state_up', 'is_local_circuit_state_up', 'is_remote_circuit_state_up', 'ipv6_protocol_tunneling'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData.Xconnect']['meta_info']
class Vpdn(_Entity_):
"""
VPDN data
.. attribute:: username
Session username
**type**\: str
**config**\: False
.. attribute:: interface_name
Interface name
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData.Vpdn, self).__init__()
self.yang_name = "vpdn"
self.yang_parent_name = "session-application-data"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('username', (YLeaf(YType.str, 'username'), ['str'])),
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
])
self.username = None
self.interface_name = None
self._segment_path = lambda: "vpdn"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData.Vpdn, ['username', 'interface_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData.Vpdn']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Sessions.Session.SessionApplicationData']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Sessions.Session']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Sessions']['meta_info']
class Session(_Entity_):
"""
L2TP control messages counters
.. attribute:: unavailable
L2TP session unavailable information
**type**\: :py:class:`Unavailable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Session.Unavailable>`
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Session, self).__init__()
self.yang_name = "session"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("unavailable", ("unavailable", L2tpv2.Nodes.Node.Session.Unavailable))])
self._leafs = OrderedDict()
self.unavailable = L2tpv2.Nodes.Node.Session.Unavailable()
self.unavailable.parent = self
self._children_name_map["unavailable"] = "unavailable"
self._segment_path = lambda: "session"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Session, [], name, value)
class Unavailable(_Entity_):
"""
L2TP session unavailable information
.. attribute:: sessions_on_hold
Number of session ID in hold database
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Session.Unavailable, self).__init__()
self.yang_name = "unavailable"
self.yang_parent_name = "session"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('sessions_on_hold', (YLeaf(YType.uint32, 'sessions-on-hold'), ['int'])),
])
self.sessions_on_hold = None
self._segment_path = lambda: "unavailable"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Session.Unavailable, ['sessions_on_hold'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Session.Unavailable']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Session']['meta_info']
class Internal(_Entity_):
"""
L2TP v2/v3 internal information
.. attribute:: internal_stats
internal stats
**type**\: :py:class:`InternalStats <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Internal.InternalStats>`
**config**\: False
.. attribute:: internal_stats_last_clear
internal stats last clear
**type**\: :py:class:`InternalStatsLastClear <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2tpv2.Nodes.Node.Internal.InternalStatsLastClear>`
**config**\: False
.. attribute:: time_last_clear
time last clear
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Internal, self).__init__()
self.yang_name = "internal"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("internal-stats", ("internal_stats", L2tpv2.Nodes.Node.Internal.InternalStats)), ("internal-stats-last-clear", ("internal_stats_last_clear", L2tpv2.Nodes.Node.Internal.InternalStatsLastClear))])
self._leafs = OrderedDict([
('time_last_clear', (YLeaf(YType.uint32, 'time-last-clear'), ['int'])),
])
self.time_last_clear = None
self.internal_stats = L2tpv2.Nodes.Node.Internal.InternalStats()
self.internal_stats.parent = self
self._children_name_map["internal_stats"] = "internal-stats"
self.internal_stats_last_clear = L2tpv2.Nodes.Node.Internal.InternalStatsLastClear()
self.internal_stats_last_clear.parent = self
self._children_name_map["internal_stats_last_clear"] = "internal-stats-last-clear"
self._segment_path = lambda: "internal"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Internal, ['time_last_clear'], name, value)
class InternalStats(_Entity_):
"""
internal stats
.. attribute:: l2tp_sh_l2x_num_tunnels
l2tp sh l2x num tunnels
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_sessions
l2tp sh l2x num sessions
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_rx_high_water_mark
l2tp sh l2x rx high water mark
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_ave_msg_process_usecs
l2tp sh l2x ave msg process usecs
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_msgs
l2tp sh l2x num rx msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_tx_msgs
l2tp sh l2x num tx msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_tx_err_drops
l2tp sh l2x num tx err drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_tx_conn_drops
l2tp sh l2x num tx conn drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_reordered_msgs
l2tp sh l2x num reordered msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_max_reorder_deviation
l2tp sh l2x max reorder deviation
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_ooo_msgs
l2tp sh l2x num ooo msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_path_drops
l2tp sh l2x num rx path drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_path_data_pkt_drops
l2tp sh l2x num rx path data pkt drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_queue_drops
l2tp sh l2x num rx queue drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_ooo_drops
l2tp sh l2x num rx ooo drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_buffered_msgs
l2tp sh l2x num buffered msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_mutex_block
l2tp sh l2x num mutex block
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_bad_len_drops
l2tp sh l2x num bad len drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_bad_avp_drops
l2tp sh l2x num bad avp drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_missing_cc_id_drops
l2tp sh l2x num missing cc id drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_missing_sess_id_drops
l2tp sh l2x num missing sess id drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_mismatch_cc_id_drops
l2tp sh l2x num mismatch cc id drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_unknown_cc_drops
l2tp sh l2x num unknown cc drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_unknown_sess_drops
l2tp sh l2x num unknown sess drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_linear_id_search
l2tp sh l2x num linear id search
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_linear_id_search_fail
l2tp sh l2x num linear id search fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_netio_pkt_rx
l2tp sh l2x num netio pkt rx
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2tun_ave_msg_process_usecs
l2tp sh l2tun ave msg process usecs
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_sh_l2tun_num_rx_msgs
l2tp sh l2tun num rx msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2tun_num_tx_msgs
l2tp sh l2tun num tx msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_l2tun_socket_ens_send_error_cnt
l2tp l2tun socket ens send error cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_l2tun_socket_session_accept
l2tp l2tun socket session accept
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_socket_session_destroy
l2tp l2tun socket session destroy
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_socket_session_connect
l2tp l2tun socket session connect
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_socket_session_connect_continue
l2tp l2tun socket session connect continue
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_connecting
l2tp l2tun session connecting
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_connected
l2tp l2tun session connected
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_disconnected
l2tp l2tun session disconnected
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_incoming
l2tp l2tun session incoming
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_updated
l2tp l2tun session updated
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_circuit_status
l2tp l2tun session circuit status
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2x_lpts_pa_stats_setup_cnt
l2x lpts pa stats setup cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_destroy_cnt
l2x lpts pa stats destroy cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_alloc_cnt
l2x lpts pa stats alloc cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_alloc_fail_cnt
l2x lpts pa stats alloc fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_init_cnt
l2x lpts pa stats init cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_init_fail_cnt
l2x lpts pa stats init fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_free_cnt
l2x lpts pa stats free cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_pulse_cnt
l2x lpts pa stats pulse cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_pulse_fail_cnt
l2x lpts pa stats pulse fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_cnt
l2x lpts pa stats bind cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_fail_cnt
l2x lpts pa stats bind fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_batch_cnt
l2x lpts pa stats bind batch cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_batch_fail_cnt
l2x lpts pa stats bind batch fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_time
l2x lpts pa stats bind time
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_expire_cnt
l2x lpts pa stats expire cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_replay_cnt
l2x lpts pa stats replay cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_replay_batch_cnt
l2x lpts pa stats replay batch cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_replay_time
l2x lpts pa stats replay time
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Internal.InternalStats, self).__init__()
self.yang_name = "internal-stats"
self.yang_parent_name = "internal"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('l2tp_sh_l2x_num_tunnels', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-tunnels'), ['int'])),
('l2tp_sh_l2x_num_sessions', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-sessions'), ['int'])),
('l2tp_sh_l2x_rx_high_water_mark', (YLeaf(YType.uint32, 'l2tp-sh-l2x-rx-high-water-mark'), ['int'])),
('l2tp_sh_l2x_ave_msg_process_usecs', (YLeaf(YType.uint64, 'l2tp-sh-l2x-ave-msg-process-usecs'), ['int'])),
('l2tp_sh_l2x_num_rx_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-msgs'), ['int'])),
('l2tp_sh_l2x_num_tx_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-tx-msgs'), ['int'])),
('l2tp_sh_l2x_num_tx_err_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-tx-err-drops'), ['int'])),
('l2tp_sh_l2x_num_tx_conn_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-tx-conn-drops'), ['int'])),
('l2tp_sh_l2x_num_reordered_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-reordered-msgs'), ['int'])),
('l2tp_sh_l2x_max_reorder_deviation', (YLeaf(YType.uint32, 'l2tp-sh-l2x-max-reorder-deviation'), ['int'])),
('l2tp_sh_l2x_num_ooo_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-ooo-msgs'), ['int'])),
('l2tp_sh_l2x_num_rx_path_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-path-drops'), ['int'])),
('l2tp_sh_l2x_num_rx_path_data_pkt_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-path-data-pkt-drops'), ['int'])),
('l2tp_sh_l2x_num_rx_queue_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-queue-drops'), ['int'])),
('l2tp_sh_l2x_num_rx_ooo_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-ooo-drops'), ['int'])),
('l2tp_sh_l2x_num_buffered_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-buffered-msgs'), ['int'])),
('l2tp_sh_l2x_num_mutex_block', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-mutex-block'), ['int'])),
('l2tp_sh_l2x_num_bad_len_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-bad-len-drops'), ['int'])),
('l2tp_sh_l2x_num_bad_avp_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-bad-avp-drops'), ['int'])),
('l2tp_sh_l2x_num_missing_cc_id_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-missing-cc-id-drops'), ['int'])),
('l2tp_sh_l2x_num_missing_sess_id_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-missing-sess-id-drops'), ['int'])),
('l2tp_sh_l2x_num_mismatch_cc_id_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-mismatch-cc-id-drops'), ['int'])),
('l2tp_sh_l2x_num_unknown_cc_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-unknown-cc-drops'), ['int'])),
('l2tp_sh_l2x_num_unknown_sess_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-unknown-sess-drops'), ['int'])),
('l2tp_sh_l2x_num_linear_id_search', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-linear-id-search'), ['int'])),
('l2tp_sh_l2x_num_linear_id_search_fail', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-linear-id-search-fail'), ['int'])),
('l2tp_sh_l2x_num_netio_pkt_rx', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-netio-pkt-rx'), ['int'])),
('l2tp_sh_l2tun_ave_msg_process_usecs', (YLeaf(YType.uint64, 'l2tp-sh-l2tun-ave-msg-process-usecs'), ['int'])),
('l2tp_sh_l2tun_num_rx_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2tun-num-rx-msgs'), ['int'])),
('l2tp_sh_l2tun_num_tx_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2tun-num-tx-msgs'), ['int'])),
('l2tp_l2tun_socket_ens_send_error_cnt', (YLeaf(YType.uint32, 'l2tp-l2tun-socket-ens-send-error-cnt'), ['int'])),
('l2tp_l2tun_socket_session_accept', (YLeaf(YType.uint64, 'l2tp-l2tun-socket-session-accept'), ['int'])),
('l2tp_l2tun_socket_session_destroy', (YLeaf(YType.uint64, 'l2tp-l2tun-socket-session-destroy'), ['int'])),
('l2tp_l2tun_socket_session_connect', (YLeaf(YType.uint64, 'l2tp-l2tun-socket-session-connect'), ['int'])),
('l2tp_l2tun_socket_session_connect_continue', (YLeaf(YType.uint64, 'l2tp-l2tun-socket-session-connect-continue'), ['int'])),
('l2tp_l2tun_session_connecting', (YLeaf(YType.uint64, 'l2tp-l2tun-session-connecting'), ['int'])),
('l2tp_l2tun_session_connected', (YLeaf(YType.uint64, 'l2tp-l2tun-session-connected'), ['int'])),
('l2tp_l2tun_session_disconnected', (YLeaf(YType.uint64, 'l2tp-l2tun-session-disconnected'), ['int'])),
('l2tp_l2tun_session_incoming', (YLeaf(YType.uint64, 'l2tp-l2tun-session-incoming'), ['int'])),
('l2tp_l2tun_session_updated', (YLeaf(YType.uint64, 'l2tp-l2tun-session-updated'), ['int'])),
('l2tp_l2tun_session_circuit_status', (YLeaf(YType.uint64, 'l2tp-l2tun-session-circuit-status'), ['int'])),
('l2x_lpts_pa_stats_setup_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-setup-cnt'), ['int'])),
('l2x_lpts_pa_stats_destroy_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-destroy-cnt'), ['int'])),
('l2x_lpts_pa_stats_alloc_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-alloc-cnt'), ['int'])),
('l2x_lpts_pa_stats_alloc_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-alloc-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_init_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-init-cnt'), ['int'])),
('l2x_lpts_pa_stats_init_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-init-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_free_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-free-cnt'), ['int'])),
('l2x_lpts_pa_stats_pulse_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-pulse-cnt'), ['int'])),
('l2x_lpts_pa_stats_pulse_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-pulse-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_batch_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-batch-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_batch_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-batch-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_time', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-time'), ['int'])),
('l2x_lpts_pa_stats_expire_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-expire-cnt'), ['int'])),
('l2x_lpts_pa_stats_replay_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-replay-cnt'), ['int'])),
('l2x_lpts_pa_stats_replay_batch_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-replay-batch-cnt'), ['int'])),
('l2x_lpts_pa_stats_replay_time', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-replay-time'), ['int'])),
])
self.l2tp_sh_l2x_num_tunnels = None
self.l2tp_sh_l2x_num_sessions = None
self.l2tp_sh_l2x_rx_high_water_mark = None
self.l2tp_sh_l2x_ave_msg_process_usecs = None
self.l2tp_sh_l2x_num_rx_msgs = None
self.l2tp_sh_l2x_num_tx_msgs = None
self.l2tp_sh_l2x_num_tx_err_drops = None
self.l2tp_sh_l2x_num_tx_conn_drops = None
self.l2tp_sh_l2x_num_reordered_msgs = None
self.l2tp_sh_l2x_max_reorder_deviation = None
self.l2tp_sh_l2x_num_ooo_msgs = None
self.l2tp_sh_l2x_num_rx_path_drops = None
self.l2tp_sh_l2x_num_rx_path_data_pkt_drops = None
self.l2tp_sh_l2x_num_rx_queue_drops = None
self.l2tp_sh_l2x_num_rx_ooo_drops = None
self.l2tp_sh_l2x_num_buffered_msgs = None
self.l2tp_sh_l2x_num_mutex_block = None
self.l2tp_sh_l2x_num_bad_len_drops = None
self.l2tp_sh_l2x_num_bad_avp_drops = None
self.l2tp_sh_l2x_num_missing_cc_id_drops = None
self.l2tp_sh_l2x_num_missing_sess_id_drops = None
self.l2tp_sh_l2x_num_mismatch_cc_id_drops = None
self.l2tp_sh_l2x_num_unknown_cc_drops = None
self.l2tp_sh_l2x_num_unknown_sess_drops = None
self.l2tp_sh_l2x_num_linear_id_search = None
self.l2tp_sh_l2x_num_linear_id_search_fail = None
self.l2tp_sh_l2x_num_netio_pkt_rx = None
self.l2tp_sh_l2tun_ave_msg_process_usecs = None
self.l2tp_sh_l2tun_num_rx_msgs = None
self.l2tp_sh_l2tun_num_tx_msgs = None
self.l2tp_l2tun_socket_ens_send_error_cnt = None
self.l2tp_l2tun_socket_session_accept = None
self.l2tp_l2tun_socket_session_destroy = None
self.l2tp_l2tun_socket_session_connect = None
self.l2tp_l2tun_socket_session_connect_continue = None
self.l2tp_l2tun_session_connecting = None
self.l2tp_l2tun_session_connected = None
self.l2tp_l2tun_session_disconnected = None
self.l2tp_l2tun_session_incoming = None
self.l2tp_l2tun_session_updated = None
self.l2tp_l2tun_session_circuit_status = None
self.l2x_lpts_pa_stats_setup_cnt = None
self.l2x_lpts_pa_stats_destroy_cnt = None
self.l2x_lpts_pa_stats_alloc_cnt = None
self.l2x_lpts_pa_stats_alloc_fail_cnt = None
self.l2x_lpts_pa_stats_init_cnt = None
self.l2x_lpts_pa_stats_init_fail_cnt = None
self.l2x_lpts_pa_stats_free_cnt = None
self.l2x_lpts_pa_stats_pulse_cnt = None
self.l2x_lpts_pa_stats_pulse_fail_cnt = None
self.l2x_lpts_pa_stats_bind_cnt = None
self.l2x_lpts_pa_stats_bind_fail_cnt = None
self.l2x_lpts_pa_stats_bind_batch_cnt = None
self.l2x_lpts_pa_stats_bind_batch_fail_cnt = None
self.l2x_lpts_pa_stats_bind_time = None
self.l2x_lpts_pa_stats_expire_cnt = None
self.l2x_lpts_pa_stats_replay_cnt = None
self.l2x_lpts_pa_stats_replay_batch_cnt = None
self.l2x_lpts_pa_stats_replay_time = None
self._segment_path = lambda: "internal-stats"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Internal.InternalStats, ['l2tp_sh_l2x_num_tunnels', 'l2tp_sh_l2x_num_sessions', 'l2tp_sh_l2x_rx_high_water_mark', 'l2tp_sh_l2x_ave_msg_process_usecs', 'l2tp_sh_l2x_num_rx_msgs', 'l2tp_sh_l2x_num_tx_msgs', 'l2tp_sh_l2x_num_tx_err_drops', 'l2tp_sh_l2x_num_tx_conn_drops', 'l2tp_sh_l2x_num_reordered_msgs', 'l2tp_sh_l2x_max_reorder_deviation', 'l2tp_sh_l2x_num_ooo_msgs', 'l2tp_sh_l2x_num_rx_path_drops', 'l2tp_sh_l2x_num_rx_path_data_pkt_drops', 'l2tp_sh_l2x_num_rx_queue_drops', 'l2tp_sh_l2x_num_rx_ooo_drops', 'l2tp_sh_l2x_num_buffered_msgs', 'l2tp_sh_l2x_num_mutex_block', 'l2tp_sh_l2x_num_bad_len_drops', 'l2tp_sh_l2x_num_bad_avp_drops', 'l2tp_sh_l2x_num_missing_cc_id_drops', 'l2tp_sh_l2x_num_missing_sess_id_drops', 'l2tp_sh_l2x_num_mismatch_cc_id_drops', 'l2tp_sh_l2x_num_unknown_cc_drops', 'l2tp_sh_l2x_num_unknown_sess_drops', 'l2tp_sh_l2x_num_linear_id_search', 'l2tp_sh_l2x_num_linear_id_search_fail', 'l2tp_sh_l2x_num_netio_pkt_rx', 'l2tp_sh_l2tun_ave_msg_process_usecs', 'l2tp_sh_l2tun_num_rx_msgs', 'l2tp_sh_l2tun_num_tx_msgs', 'l2tp_l2tun_socket_ens_send_error_cnt', 'l2tp_l2tun_socket_session_accept', 'l2tp_l2tun_socket_session_destroy', 'l2tp_l2tun_socket_session_connect', 'l2tp_l2tun_socket_session_connect_continue', 'l2tp_l2tun_session_connecting', 'l2tp_l2tun_session_connected', 'l2tp_l2tun_session_disconnected', 'l2tp_l2tun_session_incoming', 'l2tp_l2tun_session_updated', 'l2tp_l2tun_session_circuit_status', 'l2x_lpts_pa_stats_setup_cnt', 'l2x_lpts_pa_stats_destroy_cnt', 'l2x_lpts_pa_stats_alloc_cnt', 'l2x_lpts_pa_stats_alloc_fail_cnt', 'l2x_lpts_pa_stats_init_cnt', 'l2x_lpts_pa_stats_init_fail_cnt', 'l2x_lpts_pa_stats_free_cnt', 'l2x_lpts_pa_stats_pulse_cnt', 'l2x_lpts_pa_stats_pulse_fail_cnt', 'l2x_lpts_pa_stats_bind_cnt', 'l2x_lpts_pa_stats_bind_fail_cnt', 'l2x_lpts_pa_stats_bind_batch_cnt', 'l2x_lpts_pa_stats_bind_batch_fail_cnt', 'l2x_lpts_pa_stats_bind_time', 'l2x_lpts_pa_stats_expire_cnt', 'l2x_lpts_pa_stats_replay_cnt', 'l2x_lpts_pa_stats_replay_batch_cnt', 'l2x_lpts_pa_stats_replay_time'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Internal.InternalStats']['meta_info']
class InternalStatsLastClear(_Entity_):
"""
internal stats last clear
.. attribute:: l2tp_sh_l2x_num_tunnels
l2tp sh l2x num tunnels
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_sessions
l2tp sh l2x num sessions
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_rx_high_water_mark
l2tp sh l2x rx high water mark
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_ave_msg_process_usecs
l2tp sh l2x ave msg process usecs
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_msgs
l2tp sh l2x num rx msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_tx_msgs
l2tp sh l2x num tx msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_tx_err_drops
l2tp sh l2x num tx err drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_tx_conn_drops
l2tp sh l2x num tx conn drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_reordered_msgs
l2tp sh l2x num reordered msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_max_reorder_deviation
l2tp sh l2x max reorder deviation
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_ooo_msgs
l2tp sh l2x num ooo msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_path_drops
l2tp sh l2x num rx path drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_path_data_pkt_drops
l2tp sh l2x num rx path data pkt drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_queue_drops
l2tp sh l2x num rx queue drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_rx_ooo_drops
l2tp sh l2x num rx ooo drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_buffered_msgs
l2tp sh l2x num buffered msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_mutex_block
l2tp sh l2x num mutex block
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_bad_len_drops
l2tp sh l2x num bad len drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_bad_avp_drops
l2tp sh l2x num bad avp drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_missing_cc_id_drops
l2tp sh l2x num missing cc id drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_missing_sess_id_drops
l2tp sh l2x num missing sess id drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_mismatch_cc_id_drops
l2tp sh l2x num mismatch cc id drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_unknown_cc_drops
l2tp sh l2x num unknown cc drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_unknown_sess_drops
l2tp sh l2x num unknown sess drops
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_linear_id_search
l2tp sh l2x num linear id search
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_linear_id_search_fail
l2tp sh l2x num linear id search fail
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2x_num_netio_pkt_rx
l2tp sh l2x num netio pkt rx
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2tun_ave_msg_process_usecs
l2tp sh l2tun ave msg process usecs
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_sh_l2tun_num_rx_msgs
l2tp sh l2tun num rx msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_sh_l2tun_num_tx_msgs
l2tp sh l2tun num tx msgs
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_l2tun_socket_ens_send_error_cnt
l2tp l2tun socket ens send error cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2tp_l2tun_socket_session_accept
l2tp l2tun socket session accept
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_socket_session_destroy
l2tp l2tun socket session destroy
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_socket_session_connect
l2tp l2tun socket session connect
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_socket_session_connect_continue
l2tp l2tun socket session connect continue
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_connecting
l2tp l2tun session connecting
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_connected
l2tp l2tun session connected
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_disconnected
l2tp l2tun session disconnected
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_incoming
l2tp l2tun session incoming
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_updated
l2tp l2tun session updated
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2tp_l2tun_session_circuit_status
l2tp l2tun session circuit status
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: l2x_lpts_pa_stats_setup_cnt
l2x lpts pa stats setup cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_destroy_cnt
l2x lpts pa stats destroy cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_alloc_cnt
l2x lpts pa stats alloc cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_alloc_fail_cnt
l2x lpts pa stats alloc fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_init_cnt
l2x lpts pa stats init cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_init_fail_cnt
l2x lpts pa stats init fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_free_cnt
l2x lpts pa stats free cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_pulse_cnt
l2x lpts pa stats pulse cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_pulse_fail_cnt
l2x lpts pa stats pulse fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_cnt
l2x lpts pa stats bind cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_fail_cnt
l2x lpts pa stats bind fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_batch_cnt
l2x lpts pa stats bind batch cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_batch_fail_cnt
l2x lpts pa stats bind batch fail cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_bind_time
l2x lpts pa stats bind time
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_expire_cnt
l2x lpts pa stats expire cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_replay_cnt
l2x lpts pa stats replay cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_replay_batch_cnt
l2x lpts pa stats replay batch cnt
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: l2x_lpts_pa_stats_replay_time
l2x lpts pa stats replay time
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2018-11-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(L2tpv2.Nodes.Node.Internal.InternalStatsLastClear, self).__init__()
self.yang_name = "internal-stats-last-clear"
self.yang_parent_name = "internal"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('l2tp_sh_l2x_num_tunnels', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-tunnels'), ['int'])),
('l2tp_sh_l2x_num_sessions', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-sessions'), ['int'])),
('l2tp_sh_l2x_rx_high_water_mark', (YLeaf(YType.uint32, 'l2tp-sh-l2x-rx-high-water-mark'), ['int'])),
('l2tp_sh_l2x_ave_msg_process_usecs', (YLeaf(YType.uint64, 'l2tp-sh-l2x-ave-msg-process-usecs'), ['int'])),
('l2tp_sh_l2x_num_rx_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-msgs'), ['int'])),
('l2tp_sh_l2x_num_tx_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-tx-msgs'), ['int'])),
('l2tp_sh_l2x_num_tx_err_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-tx-err-drops'), ['int'])),
('l2tp_sh_l2x_num_tx_conn_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-tx-conn-drops'), ['int'])),
('l2tp_sh_l2x_num_reordered_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-reordered-msgs'), ['int'])),
('l2tp_sh_l2x_max_reorder_deviation', (YLeaf(YType.uint32, 'l2tp-sh-l2x-max-reorder-deviation'), ['int'])),
('l2tp_sh_l2x_num_ooo_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-ooo-msgs'), ['int'])),
('l2tp_sh_l2x_num_rx_path_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-path-drops'), ['int'])),
('l2tp_sh_l2x_num_rx_path_data_pkt_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-path-data-pkt-drops'), ['int'])),
('l2tp_sh_l2x_num_rx_queue_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-queue-drops'), ['int'])),
('l2tp_sh_l2x_num_rx_ooo_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-rx-ooo-drops'), ['int'])),
('l2tp_sh_l2x_num_buffered_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-buffered-msgs'), ['int'])),
('l2tp_sh_l2x_num_mutex_block', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-mutex-block'), ['int'])),
('l2tp_sh_l2x_num_bad_len_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-bad-len-drops'), ['int'])),
('l2tp_sh_l2x_num_bad_avp_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-bad-avp-drops'), ['int'])),
('l2tp_sh_l2x_num_missing_cc_id_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-missing-cc-id-drops'), ['int'])),
('l2tp_sh_l2x_num_missing_sess_id_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-missing-sess-id-drops'), ['int'])),
('l2tp_sh_l2x_num_mismatch_cc_id_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-mismatch-cc-id-drops'), ['int'])),
('l2tp_sh_l2x_num_unknown_cc_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-unknown-cc-drops'), ['int'])),
('l2tp_sh_l2x_num_unknown_sess_drops', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-unknown-sess-drops'), ['int'])),
('l2tp_sh_l2x_num_linear_id_search', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-linear-id-search'), ['int'])),
('l2tp_sh_l2x_num_linear_id_search_fail', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-linear-id-search-fail'), ['int'])),
('l2tp_sh_l2x_num_netio_pkt_rx', (YLeaf(YType.uint32, 'l2tp-sh-l2x-num-netio-pkt-rx'), ['int'])),
('l2tp_sh_l2tun_ave_msg_process_usecs', (YLeaf(YType.uint64, 'l2tp-sh-l2tun-ave-msg-process-usecs'), ['int'])),
('l2tp_sh_l2tun_num_rx_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2tun-num-rx-msgs'), ['int'])),
('l2tp_sh_l2tun_num_tx_msgs', (YLeaf(YType.uint32, 'l2tp-sh-l2tun-num-tx-msgs'), ['int'])),
('l2tp_l2tun_socket_ens_send_error_cnt', (YLeaf(YType.uint32, 'l2tp-l2tun-socket-ens-send-error-cnt'), ['int'])),
('l2tp_l2tun_socket_session_accept', (YLeaf(YType.uint64, 'l2tp-l2tun-socket-session-accept'), ['int'])),
('l2tp_l2tun_socket_session_destroy', (YLeaf(YType.uint64, 'l2tp-l2tun-socket-session-destroy'), ['int'])),
('l2tp_l2tun_socket_session_connect', (YLeaf(YType.uint64, 'l2tp-l2tun-socket-session-connect'), ['int'])),
('l2tp_l2tun_socket_session_connect_continue', (YLeaf(YType.uint64, 'l2tp-l2tun-socket-session-connect-continue'), ['int'])),
('l2tp_l2tun_session_connecting', (YLeaf(YType.uint64, 'l2tp-l2tun-session-connecting'), ['int'])),
('l2tp_l2tun_session_connected', (YLeaf(YType.uint64, 'l2tp-l2tun-session-connected'), ['int'])),
('l2tp_l2tun_session_disconnected', (YLeaf(YType.uint64, 'l2tp-l2tun-session-disconnected'), ['int'])),
('l2tp_l2tun_session_incoming', (YLeaf(YType.uint64, 'l2tp-l2tun-session-incoming'), ['int'])),
('l2tp_l2tun_session_updated', (YLeaf(YType.uint64, 'l2tp-l2tun-session-updated'), ['int'])),
('l2tp_l2tun_session_circuit_status', (YLeaf(YType.uint64, 'l2tp-l2tun-session-circuit-status'), ['int'])),
('l2x_lpts_pa_stats_setup_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-setup-cnt'), ['int'])),
('l2x_lpts_pa_stats_destroy_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-destroy-cnt'), ['int'])),
('l2x_lpts_pa_stats_alloc_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-alloc-cnt'), ['int'])),
('l2x_lpts_pa_stats_alloc_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-alloc-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_init_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-init-cnt'), ['int'])),
('l2x_lpts_pa_stats_init_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-init-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_free_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-free-cnt'), ['int'])),
('l2x_lpts_pa_stats_pulse_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-pulse-cnt'), ['int'])),
('l2x_lpts_pa_stats_pulse_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-pulse-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_batch_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-batch-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_batch_fail_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-batch-fail-cnt'), ['int'])),
('l2x_lpts_pa_stats_bind_time', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-bind-time'), ['int'])),
('l2x_lpts_pa_stats_expire_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-expire-cnt'), ['int'])),
('l2x_lpts_pa_stats_replay_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-replay-cnt'), ['int'])),
('l2x_lpts_pa_stats_replay_batch_cnt', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-replay-batch-cnt'), ['int'])),
('l2x_lpts_pa_stats_replay_time', (YLeaf(YType.uint32, 'l2x-lpts-pa-stats-replay-time'), ['int'])),
])
self.l2tp_sh_l2x_num_tunnels = None
self.l2tp_sh_l2x_num_sessions = None
self.l2tp_sh_l2x_rx_high_water_mark = None
self.l2tp_sh_l2x_ave_msg_process_usecs = None
self.l2tp_sh_l2x_num_rx_msgs = None
self.l2tp_sh_l2x_num_tx_msgs = None
self.l2tp_sh_l2x_num_tx_err_drops = None
self.l2tp_sh_l2x_num_tx_conn_drops = None
self.l2tp_sh_l2x_num_reordered_msgs = None
self.l2tp_sh_l2x_max_reorder_deviation = None
self.l2tp_sh_l2x_num_ooo_msgs = None
self.l2tp_sh_l2x_num_rx_path_drops = None
self.l2tp_sh_l2x_num_rx_path_data_pkt_drops = None
self.l2tp_sh_l2x_num_rx_queue_drops = None
self.l2tp_sh_l2x_num_rx_ooo_drops = None
self.l2tp_sh_l2x_num_buffered_msgs = None
self.l2tp_sh_l2x_num_mutex_block = None
self.l2tp_sh_l2x_num_bad_len_drops = None
self.l2tp_sh_l2x_num_bad_avp_drops = None
self.l2tp_sh_l2x_num_missing_cc_id_drops = None
self.l2tp_sh_l2x_num_missing_sess_id_drops = None
self.l2tp_sh_l2x_num_mismatch_cc_id_drops = None
self.l2tp_sh_l2x_num_unknown_cc_drops = None
self.l2tp_sh_l2x_num_unknown_sess_drops = None
self.l2tp_sh_l2x_num_linear_id_search = None
self.l2tp_sh_l2x_num_linear_id_search_fail = None
self.l2tp_sh_l2x_num_netio_pkt_rx = None
self.l2tp_sh_l2tun_ave_msg_process_usecs = None
self.l2tp_sh_l2tun_num_rx_msgs = None
self.l2tp_sh_l2tun_num_tx_msgs = None
self.l2tp_l2tun_socket_ens_send_error_cnt = None
self.l2tp_l2tun_socket_session_accept = None
self.l2tp_l2tun_socket_session_destroy = None
self.l2tp_l2tun_socket_session_connect = None
self.l2tp_l2tun_socket_session_connect_continue = None
self.l2tp_l2tun_session_connecting = None
self.l2tp_l2tun_session_connected = None
self.l2tp_l2tun_session_disconnected = None
self.l2tp_l2tun_session_incoming = None
self.l2tp_l2tun_session_updated = None
self.l2tp_l2tun_session_circuit_status = None
self.l2x_lpts_pa_stats_setup_cnt = None
self.l2x_lpts_pa_stats_destroy_cnt = None
self.l2x_lpts_pa_stats_alloc_cnt = None
self.l2x_lpts_pa_stats_alloc_fail_cnt = None
self.l2x_lpts_pa_stats_init_cnt = None
self.l2x_lpts_pa_stats_init_fail_cnt = None
self.l2x_lpts_pa_stats_free_cnt = None
self.l2x_lpts_pa_stats_pulse_cnt = None
self.l2x_lpts_pa_stats_pulse_fail_cnt = None
self.l2x_lpts_pa_stats_bind_cnt = None
self.l2x_lpts_pa_stats_bind_fail_cnt = None
self.l2x_lpts_pa_stats_bind_batch_cnt = None
self.l2x_lpts_pa_stats_bind_batch_fail_cnt = None
self.l2x_lpts_pa_stats_bind_time = None
self.l2x_lpts_pa_stats_expire_cnt = None
self.l2x_lpts_pa_stats_replay_cnt = None
self.l2x_lpts_pa_stats_replay_batch_cnt = None
self.l2x_lpts_pa_stats_replay_time = None
self._segment_path = lambda: "internal-stats-last-clear"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(L2tpv2.Nodes.Node.Internal.InternalStatsLastClear, ['l2tp_sh_l2x_num_tunnels', 'l2tp_sh_l2x_num_sessions', 'l2tp_sh_l2x_rx_high_water_mark', 'l2tp_sh_l2x_ave_msg_process_usecs', 'l2tp_sh_l2x_num_rx_msgs', 'l2tp_sh_l2x_num_tx_msgs', 'l2tp_sh_l2x_num_tx_err_drops', 'l2tp_sh_l2x_num_tx_conn_drops', 'l2tp_sh_l2x_num_reordered_msgs', 'l2tp_sh_l2x_max_reorder_deviation', 'l2tp_sh_l2x_num_ooo_msgs', 'l2tp_sh_l2x_num_rx_path_drops', 'l2tp_sh_l2x_num_rx_path_data_pkt_drops', 'l2tp_sh_l2x_num_rx_queue_drops', 'l2tp_sh_l2x_num_rx_ooo_drops', 'l2tp_sh_l2x_num_buffered_msgs', 'l2tp_sh_l2x_num_mutex_block', 'l2tp_sh_l2x_num_bad_len_drops', 'l2tp_sh_l2x_num_bad_avp_drops', 'l2tp_sh_l2x_num_missing_cc_id_drops', 'l2tp_sh_l2x_num_missing_sess_id_drops', 'l2tp_sh_l2x_num_mismatch_cc_id_drops', 'l2tp_sh_l2x_num_unknown_cc_drops', 'l2tp_sh_l2x_num_unknown_sess_drops', 'l2tp_sh_l2x_num_linear_id_search', 'l2tp_sh_l2x_num_linear_id_search_fail', 'l2tp_sh_l2x_num_netio_pkt_rx', 'l2tp_sh_l2tun_ave_msg_process_usecs', 'l2tp_sh_l2tun_num_rx_msgs', 'l2tp_sh_l2tun_num_tx_msgs', 'l2tp_l2tun_socket_ens_send_error_cnt', 'l2tp_l2tun_socket_session_accept', 'l2tp_l2tun_socket_session_destroy', 'l2tp_l2tun_socket_session_connect', 'l2tp_l2tun_socket_session_connect_continue', 'l2tp_l2tun_session_connecting', 'l2tp_l2tun_session_connected', 'l2tp_l2tun_session_disconnected', 'l2tp_l2tun_session_incoming', 'l2tp_l2tun_session_updated', 'l2tp_l2tun_session_circuit_status', 'l2x_lpts_pa_stats_setup_cnt', 'l2x_lpts_pa_stats_destroy_cnt', 'l2x_lpts_pa_stats_alloc_cnt', 'l2x_lpts_pa_stats_alloc_fail_cnt', 'l2x_lpts_pa_stats_init_cnt', 'l2x_lpts_pa_stats_init_fail_cnt', 'l2x_lpts_pa_stats_free_cnt', 'l2x_lpts_pa_stats_pulse_cnt', 'l2x_lpts_pa_stats_pulse_fail_cnt', 'l2x_lpts_pa_stats_bind_cnt', 'l2x_lpts_pa_stats_bind_fail_cnt', 'l2x_lpts_pa_stats_bind_batch_cnt', 'l2x_lpts_pa_stats_bind_batch_fail_cnt', 'l2x_lpts_pa_stats_bind_time', 'l2x_lpts_pa_stats_expire_cnt', 'l2x_lpts_pa_stats_replay_cnt', 'l2x_lpts_pa_stats_replay_batch_cnt', 'l2x_lpts_pa_stats_replay_time'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Internal.InternalStatsLastClear']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node.Internal']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes.Node']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2.Nodes']['meta_info']
def clone_ptr(self):
self._top_entity = L2tpv2()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2tpv2']['meta_info']
| 51.726315
| 2,121
| 0.365911
| 56,613
| 849,553
| 5.182308
| 0.00779
| 0.042892
| 0.068783
| 0.040854
| 0.984382
| 0.979587
| 0.977559
| 0.976297
| 0.972388
| 0.966222
| 0
| 0.046669
| 0.548428
| 849,553
| 16,423
| 2,122
| 51.729465
| 0.718085
| 0.213995
| 0
| 0.886382
| 0
| 0
| 0.209167
| 0.141747
| 0.000615
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0.015176
| 0.02338
| 0
| 0.133716
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4fed25f6edcec6a0dfcb652960971e997f896caf
| 112,275
|
py
|
Python
|
other-versions/test_synth.py
|
kris314/deep-text-recognition-benchmark
|
741dd9abc8b7b2f29ba088b308f0e8c1483153d9
|
[
"Apache-2.0"
] | null | null | null |
other-versions/test_synth.py
|
kris314/deep-text-recognition-benchmark
|
741dd9abc8b7b2f29ba088b308f0e8c1483153d9
|
[
"Apache-2.0"
] | null | null | null |
other-versions/test_synth.py
|
kris314/deep-text-recognition-benchmark
|
741dd9abc8b7b2f29ba088b308f0e8c1483153d9
|
[
"Apache-2.0"
] | null | null | null |
import os
import time
import string
import argparse
import re
import random
import torch
import torch.backends.cudnn as cudnn
import torch.utils.data
import torch.nn.functional as F
import numpy as np
from nltk.metrics.distance import edit_distance
from utils import CTCLabelConverter, AttnLabelConverter, Averager
from dataset import hierarchical_dataset, AlignCollate, tensor2im, save_image
from model import Model, AdaINGen, MsImageDis
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
import pdb
def make_noise(batch, latent_dim, n_noise, device):
if n_noise == 1:
return torch.randn(batch, latent_dim, device=device)
noises = torch.randn(n_noise, batch, latent_dim, device=device).unbind(0)
return noises
def mixing_noise(batch, latent_dim, prob, device):
if prob > 0 and random.random() < prob:
return make_noise(batch, latent_dim, 2, device)
else:
return [make_noise(batch, latent_dim, 1, device)]
def g_nonsaturating_loss(fake_pred):
loss = F.softplus(-fake_pred).mean()
return loss
def d_logistic_loss(real_pred, fake_pred):
real_loss = F.softplus(-real_pred)
fake_loss = F.softplus(fake_pred)
return real_loss.mean() + fake_loss.mean()
def benchmark_all_eval(synthModel, ocrModel, recCriterion, styleRecCriterion, ocrCriterion, converter, opt, calculate_infer_time=False):
""" evaluation with 10 benchmark evaluation datasets """
# The evaluation datasets, dataset order is same with Table 1 in our paper.
eval_data_list = ['IIIT5k_3000', 'SVT', 'IC03_860', 'IC03_867', 'IC13_857',
'IC13_1015', 'IC15_1811', 'IC15_2077', 'SVTP', 'CUTE80']
if calculate_infer_time:
evaluation_batch_size = 1 # batch_size should be 1 to calculate the GPU inference time per image.
else:
evaluation_batch_size = opt.batch_size
list_accuracy = []
total_forward_time = 0
total_evaluation_data_number = 0
total_correct_number = 0
log = open(f'./result/{opt.exp_name}/log_all_evaluation.txt', 'a')
dashed_line = '-' * 80
print(dashed_line)
log.write(dashed_line + '\n')
for eval_data in eval_data_list:
eval_data_path = os.path.join(opt.eval_data, eval_data)
AlignCollate_evaluation = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
eval_data, eval_data_log = hierarchical_dataset(root=eval_data_path, opt=opt)
evaluation_loader = torch.utils.data.DataLoader(
eval_data, batch_size=evaluation_batch_size,
shuffle=False,
num_workers=int(opt.workers),
collate_fn=AlignCollate_evaluation, pin_memory=True)
_, accuracy_by_best_model, norm_ED_by_best_model, _, _, _, infer_time, length_of_data = validation(
model, criterion, evaluation_loader, converter, opt)
list_accuracy.append(f'{accuracy_by_best_model:0.3f}')
total_forward_time += infer_time
total_evaluation_data_number += len(eval_data)
total_correct_number += accuracy_by_best_model * length_of_data
log.write(eval_data_log)
print(f'Acc {accuracy_by_best_model:0.3f}\t normalized_ED {norm_ED_by_best_model:0.3f}')
log.write(f'Acc {accuracy_by_best_model:0.3f}\t normalized_ED {norm_ED_by_best_model:0.3f}\n')
print(dashed_line)
log.write(dashed_line + '\n')
averaged_forward_time = total_forward_time / total_evaluation_data_number * 1000
total_accuracy = total_correct_number / total_evaluation_data_number
params_num = sum([np.prod(p.size()) for p in model.parameters()])
evaluation_log = 'accuracy: '
for name, accuracy in zip(eval_data_list, list_accuracy):
evaluation_log += f'{name}: {accuracy}\t'
evaluation_log += f'total_accuracy: {total_accuracy:0.3f}\t'
evaluation_log += f'averaged_infer_time: {averaged_forward_time:0.3f}\t# parameters: {params_num/1e6:0.3f}'
print(evaluation_log)
log.write(evaluation_log + '\n')
log.close()
return None
def validation(model, criterion, evaluation_loader, converter, opt):
""" validation or evaluation """
n_correct = 0
norm_ED = 0
length_of_data = 0
infer_time = 0
valid_loss_avg = Averager()
for i, (image_tensors, labels) in enumerate(evaluation_loader):
batch_size = image_tensors.size(0)
length_of_data = length_of_data + batch_size
image = image_tensors.to(device)
# For max length prediction
length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
text_for_loss, length_for_loss = converter.encode(labels, batch_max_length=opt.batch_max_length)
start_time = time.time()
if 'CTC' in opt.Prediction:
preds = model(image, text_for_pred)
forward_time = time.time() - start_time
# Calculate evaluation loss for CTC deocder.
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
# permute 'preds' to use CTCloss format
cost = criterion(preds.log_softmax(2).permute(1, 0, 2), text_for_loss, preds_size, length_for_loss)
# Select max probabilty (greedy decoding) then decode index to character
_, preds_index = preds.max(2)
preds_str = converter.decode(preds_index.data, preds_size.data)
else:
preds = model(image, text_for_pred, is_train=False)
forward_time = time.time() - start_time
preds = preds[:, :text_for_loss.shape[1] - 1, :]
target = text_for_loss[:, 1:] # without [GO] Symbol
cost = criterion(preds.contiguous().view(-1, preds.shape[-1]), target.contiguous().view(-1))
# select max probabilty (greedy decoding) then decode index to character
_, preds_index = preds.max(2)
preds_str = converter.decode(preds_index, length_for_pred)
labels = converter.decode(text_for_loss[:, 1:], length_for_loss)
infer_time += forward_time
valid_loss_avg.add(cost)
# calculate accuracy & confidence score
preds_prob = F.softmax(preds, dim=2)
preds_max_prob, _ = preds_prob.max(dim=2)
confidence_score_list = []
for gt, pred, pred_max_prob in zip(labels, preds_str, preds_max_prob):
if 'Attn' in opt.Prediction:
gt = gt[:gt.find('[s]')]
pred_EOS = pred.find('[s]')
pred = pred[:pred_EOS] # prune after "end of sentence" token ([s])
pred_max_prob = pred_max_prob[:pred_EOS]
# To evaluate 'case sensitive model' with alphanumeric and case insensitve setting.
if opt.sensitive and opt.data_filtering_off:
pred = pred.lower()
gt = gt.lower()
alphanumeric_case_insensitve = '0123456789abcdefghijklmnopqrstuvwxyz'
out_of_alphanumeric_case_insensitve = f'[^{alphanumeric_case_insensitve}]'
pred = re.sub(out_of_alphanumeric_case_insensitve, '', pred)
gt = re.sub(out_of_alphanumeric_case_insensitve, '', gt)
if pred == gt:
n_correct += 1
'''
(old version) ICDAR2017 DOST Normalized Edit Distance https://rrc.cvc.uab.es/?ch=7&com=tasks
"For each word we calculate the normalized edit distance to the length of the ground truth transcription."
if len(gt) == 0:
norm_ED += 1
else:
norm_ED += edit_distance(pred, gt) / len(gt)
'''
# ICDAR2019 Normalized Edit Distance
if len(gt) == 0 or len(pred) == 0:
norm_ED += 0
elif len(gt) > len(pred):
norm_ED += 1 - edit_distance(pred, gt) / len(gt)
else:
norm_ED += 1 - edit_distance(pred, gt) / len(pred)
# calculate confidence score (= multiply of pred_max_prob)
try:
confidence_score = pred_max_prob.cumprod(dim=0)[-1]
except:
confidence_score = 0 # for empty pred case, when prune after "end of sentence" token ([s])
confidence_score_list.append(confidence_score)
# print(pred, gt, pred==gt, confidence_score)
accuracy = n_correct / float(length_of_data) * 100
norm_ED = norm_ED / float(length_of_data) # ICDAR2019 Normalized Edit Distance
return valid_loss_avg.val(), accuracy, norm_ED, preds_str, confidence_score_list, labels, infer_time, length_of_data
def validation_synth(iterCntr, synthModel, ocrModel, recCriterion, ocrCriterion, evaluation_loader, converter, opt):
""" validation or evaluation """
os.makedirs(os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr)), exist_ok=True)
n_correct_ocr = 0
norm_ED_ocr = 0
n_correct_1 = 0
norm_ED_1 = 0
n_correct_2 = 0
norm_ED_2 = 0
length_of_data = 0
infer_time = 0
valid_loss_avg_ocr = Averager()
valid_loss_avg = Averager()
lexicons=[]
out_of_char = f'[^{opt.character}]'
#read lexicons file
with open(opt.lexFile,'r') as lexF:
for line in lexF:
lexWord = line[:-1]
if len(lexWord) <= opt.batch_max_length and not(re.search(out_of_char, lexWord.lower())):
lexicons.append(lexWord)
for i, (image_tensors, labels_1) in enumerate(evaluation_loader):
# print(i)
if opt.debugFlag and i>2:
break
batch_size = image_tensors.size(0)
#generate lexicons
labels_2 = random.sample(lexicons, batch_size)
length_of_data = length_of_data + batch_size
image = image_tensors.to(device)
# For max length prediction
length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
text_for_loss_1, length_for_loss_1 = converter.encode(labels_1, batch_max_length=opt.batch_max_length)
text_for_loss_2, length_for_loss_2 = converter.encode(labels_2, batch_max_length=opt.batch_max_length)
start_time = time.time()
images_recon_1, images_recon_2,_ = synthModel(image, text_for_loss_1, text_for_loss_2)
#Save random reconstructed image and write its gt
rIdx = random.randint(0,batch_size-1)
try:
save_image(tensor2im(image[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_input_'+labels_1[rIdx]+'.png'))
save_image(tensor2im(images_recon_1[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_recon_'+labels_1[rIdx]+'.png'))
save_image(tensor2im(images_recon_2[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_pair_'+labels_2[rIdx]+'.png'))
except:
print('Warning while saving validation image')
if 'CTC' in opt.Prediction:
preds_ocr = ocrModel(image, text_for_pred)
preds_1 = ocrModel(images_recon_1, text_for_pred)
preds_2 = ocrModel(images_recon_2, text_for_pred)
forward_time = time.time() - start_time
# Calculate evaluation loss for CTC deocder.
preds_size_1 = torch.IntTensor([preds_1.size(1)] * batch_size)
preds_size_2 = torch.IntTensor([preds_2.size(1)] * batch_size)
# permute 'preds' to use CTCloss format
ocrCost_ocr = ocrCriterion(preds_ocr.log_softmax(2).permute(1, 0, 2), text_for_loss_1, preds_size_1, length_for_loss_1)
ocrCost_1 = ocrCriterion(preds_1.log_softmax(2).permute(1, 0, 2), text_for_loss_1, preds_size_1, length_for_loss_1)
ocrCost_2 = ocrCriterion(preds_2.log_softmax(2).permute(1, 0, 2), text_for_loss_2, preds_size_2, length_for_loss_2)
# Select max probabilty (greedy decoding) then decode index to character
_, preds_index_ocr = preds_ocr.max(2)
_, preds_index_1 = preds_1.max(2)
_, preds_index_2 = preds_2.max(2)
preds_str_ocr = converter.decode(preds_index_ocr.data, preds_size_1.data)
preds_str_1 = converter.decode(preds_index_1.data, preds_size_1.data)
preds_str_2 = converter.decode(preds_index_2.data, preds_size_2.data)
else:
preds = model(image, text_for_pred, is_train=False)
forward_time = time.time() - start_time
preds = preds[:, :text_for_loss.shape[1] - 1, :]
target = text_for_loss[:, 1:] # without [GO] Symbol
cost = criterion(preds.contiguous().view(-1, preds.shape[-1]), target.contiguous().view(-1))
# select max probabilty (greedy decoding) then decode index to character
_, preds_index = preds.max(2)
preds_str = converter.decode(preds_index, length_for_pred)
labels = converter.decode(text_for_loss[:, 1:], length_for_loss)
recCost = recCriterion(images_recon_1,image)
infer_time += forward_time
valid_loss_avg_ocr.add(ocrCost_ocr)
valid_loss_avg.add(ocrCost_1+ocrCost_2+recCost)
# calculate accuracy & confidence score
preds_prob_ocr = F.softmax(preds_ocr, dim=2)
preds_max_prob_ocr, _ = preds_prob_ocr.max(dim=2)
preds_prob_1 = F.softmax(preds_1, dim=2)
preds_max_prob_1, _ = preds_prob_1.max(dim=2)
preds_prob_2 = F.softmax(preds_2, dim=2)
preds_max_prob_2, _ = preds_prob_2.max(dim=2)
confidence_score_list_ocr = []
confidence_score_list_1 = []
confidence_score_list_2 = []
for gt_ocr, pred_ocr, pred_max_prob_ocr, gt_1, pred_1, pred_max_prob_1, gt_2, pred_2, pred_max_prob_2 in zip(labels_1, preds_str_ocr, preds_max_prob_ocr, labels_1, preds_str_1, preds_max_prob_1, labels_2, preds_str_2, preds_max_prob_2):
if 'Attn' in opt.Prediction:
gt = gt[:gt.find('[s]')]
pred_EOS = pred.find('[s]')
pred = pred[:pred_EOS] # prune after "end of sentence" token ([s])
pred_max_prob = pred_max_prob[:pred_EOS]
# To evaluate 'case sensitive model' with alphanumeric and case insensitve setting.
if opt.sensitive and opt.data_filtering_off:
pred = pred.lower()
gt = gt.lower()
alphanumeric_case_insensitve = '0123456789abcdefghijklmnopqrstuvwxyz'
out_of_alphanumeric_case_insensitve = f'[^{alphanumeric_case_insensitve}]'
pred = re.sub(out_of_alphanumeric_case_insensitve, '', pred)
gt = re.sub(out_of_alphanumeric_case_insensitve, '', gt)
if pred_ocr == gt_ocr:
n_correct_ocr += 1
if pred_1 == gt_1:
n_correct_1 += 1
if pred_2 == gt_2:
n_correct_2 += 1
'''
(old version) ICDAR2017 DOST Normalized Edit Distance https://rrc.cvc.uab.es/?ch=7&com=tasks
"For each word we calculate the normalized edit distance to the length of the ground truth transcription."
if len(gt) == 0:
norm_ED += 1
else:
norm_ED += edit_distance(pred, gt) / len(gt)
'''
# ICDAR2019 Normalized Edit Distance
if len(gt_1) == 0 or len(pred_1) == 0:
norm_ED_1 += 0
elif len(gt_1) > len(pred_1):
norm_ED_1 += 1 - edit_distance(pred_1, gt_1) / len(gt_1)
else:
norm_ED_1 += 1 - edit_distance(pred_1, gt_1) / len(pred_1)
# ICDAR2019 Normalized Edit Distance
if len(gt_2) == 0 or len(pred_2) == 0:
norm_ED_2 += 0
elif len(gt_2) > len(pred_2):
norm_ED_2 += 1 - edit_distance(pred_2, gt_2) / len(gt_2)
else:
norm_ED_2 += 1 - edit_distance(pred_2, gt_2) / len(pred_2)
# ICDAR2019 Normalized Edit Distance
if len(gt_ocr) == 0 or len(pred_ocr) == 0:
norm_ED_ocr += 0
elif len(gt_ocr) > len(pred_ocr):
norm_ED_ocr += 1 - edit_distance(pred_ocr, gt_ocr) / len(gt_ocr)
else:
norm_ED_ocr += 1 - edit_distance(pred_ocr, gt_ocr) / len(pred_ocr)
# calculate confidence score (= multiply of pred_max_prob)
try:
confidence_score_ocr = pred_max_prob_ocr.cumprod(dim=0)[-1]
confidence_score_1 = pred_max_prob_1.cumprod(dim=0)[-1]
confidence_score_2 = pred_max_prob_2.cumprod(dim=0)[-1]
except:
confidence_score_ocr = 0
confidence_score_1 = 0 # for empty pred case, when prune after "end of sentence" token ([s])
confidence_score_2 = 0 # for empty pred case, when prune after "end of sentence" token ([s])
confidence_score_list_ocr.append(confidence_score_ocr)
confidence_score_list_1.append(confidence_score_1)
confidence_score_list_2.append(confidence_score_2)
# print(pred, gt, pred==gt, confidence_score)
accuracy_ocr = n_correct_ocr / float(length_of_data) * 100
norm_ED_ocr = norm_ED_ocr / float(length_of_data) # ICDAR2019 Normalized Edit Distance
accuracy_1 = n_correct_1 / float(length_of_data) * 100
norm_ED_1 = norm_ED_1 / float(length_of_data) # ICDAR2019 Normalized Edit Distance
accuracy_2 = n_correct_2 / float(length_of_data) * 100
norm_ED_2 = norm_ED_2 / float(length_of_data) # ICDAR2019 Normalized Edit Distance
return [valid_loss_avg_ocr.val(), valid_loss_avg.val()], [accuracy_ocr,accuracy_1,accuracy_2], [norm_ED_ocr,norm_ED_1,norm_ED_2], [preds_str_ocr, preds_str_1,preds_str_2], [confidence_score_list_ocr,confidence_score_list_1,confidence_score_list_2], [labels_1,labels_1,labels_2], infer_time, length_of_data
def validation_synth_adv(iterCntr, synthModel, ocrModel, disModel, recCriterion, ocrCriterion, evaluation_loader, converter, opt):
""" validation or evaluation """
os.makedirs(os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr)), exist_ok=True)
n_correct_ocr = 0
norm_ED_ocr = 0
n_correct_1 = 0
norm_ED_1 = 0
n_correct_2 = 0
norm_ED_2 = 0
length_of_data = 0
infer_time = 0
valid_loss_avg_ocr = Averager()
valid_loss_avg = Averager()
valid_loss_avg_dis = Averager()
lexicons=[]
out_of_char = f'[^{opt.character}]'
#read lexicons file
with open(opt.lexFile,'r') as lexF:
for line in lexF:
lexWord = line[:-1]
if len(lexWord) <= opt.batch_max_length and not(re.search(out_of_char, lexWord.lower())):
lexicons.append(lexWord)
for i, (image_tensors_all, labels_1_all) in enumerate(evaluation_loader):
# print(i)
if opt.debugFlag and i>2:
break
disCnt = int(image_tensors_all.size(0)/2)
image_tensors, image_tensors_real, labels_1 = image_tensors_all[:disCnt], image_tensors_all[disCnt:disCnt+disCnt], labels_1_all[:disCnt]
batch_size = image_tensors.size(0)
#generate lexicons
labels_2 = random.sample(lexicons, batch_size)
length_of_data = length_of_data + batch_size
image = image_tensors.to(device)
image_real = image_tensors_real.to(device)
# For max length prediction
length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
text_for_loss_1, length_for_loss_1 = converter.encode(labels_1, batch_max_length=opt.batch_max_length)
text_for_loss_2, length_for_loss_2 = converter.encode(labels_2, batch_max_length=opt.batch_max_length)
start_time = time.time()
images_recon_1, images_recon_2, _ = synthModel(image, text_for_loss_1, text_for_loss_2)
#Save random reconstructed image and write its gt
rIdx = random.randint(0,batch_size-1)
try:
save_image(tensor2im(image[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_input_'+labels_1[rIdx]+'.png'))
save_image(tensor2im(images_recon_1[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_recon_'+labels_1[rIdx]+'.png'))
save_image(tensor2im(images_recon_2[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_pair_'+labels_2[rIdx]+'.png'))
except:
print('Warning while saving validation image')
if 'CTC' in opt.Prediction:
preds_ocr = ocrModel(image, text_for_pred)
preds_1 = ocrModel(images_recon_1, text_for_pred)
preds_2 = ocrModel(images_recon_2, text_for_pred)
forward_time = time.time() - start_time
# Calculate evaluation loss for CTC deocder.
preds_size_1 = torch.IntTensor([preds_1.size(1)] * batch_size)
preds_size_2 = torch.IntTensor([preds_2.size(1)] * batch_size)
# permute 'preds' to use CTCloss format
ocrCost_ocr = ocrCriterion(preds_ocr.log_softmax(2).permute(1, 0, 2), text_for_loss_1, preds_size_1, length_for_loss_1)
ocrCost_1 = ocrCriterion(preds_1.log_softmax(2).permute(1, 0, 2), text_for_loss_1, preds_size_1, length_for_loss_1)
ocrCost_2 = ocrCriterion(preds_2.log_softmax(2).permute(1, 0, 2), text_for_loss_2, preds_size_2, length_for_loss_2)
# Select max probabilty (greedy decoding) then decode index to character
_, preds_index_ocr = preds_ocr.max(2)
_, preds_index_1 = preds_1.max(2)
_, preds_index_2 = preds_2.max(2)
preds_str_ocr = converter.decode(preds_index_ocr.data, preds_size_1.data)
preds_str_1 = converter.decode(preds_index_1.data, preds_size_1.data)
preds_str_2 = converter.decode(preds_index_2.data, preds_size_2.data)
disCost = 0.5*(disModel.module.calc_dis_loss(images_recon_1.detach(), image_real) + disModel.module.calc_dis_loss(images_recon_2.detach(), image))
disGenCost = 0.5*(disModel.module.calc_gen_loss(images_recon_1)+disModel.module.calc_gen_loss(images_recon_2))
else:
preds = model(image, text_for_pred, is_train=False)
forward_time = time.time() - start_time
preds = preds[:, :text_for_loss.shape[1] - 1, :]
target = text_for_loss[:, 1:] # without [GO] Symbol
cost = criterion(preds.contiguous().view(-1, preds.shape[-1]), target.contiguous().view(-1))
# select max probabilty (greedy decoding) then decode index to character
_, preds_index = preds.max(2)
preds_str = converter.decode(preds_index, length_for_pred)
labels = converter.decode(text_for_loss[:, 1:], length_for_loss)
recCost = recCriterion(images_recon_1,image)
infer_time += forward_time
valid_loss_avg_ocr.add(ocrCost_ocr)
valid_loss_avg.add(opt.ocrWeight*(0.5*(ocrCost_1+ocrCost_2))+opt.reconWeight*recCost+opt.disWeight*disGenCost)
valid_loss_avg_dis.add(opt.disWeight*disCost)
# calculate accuracy & confidence score
preds_prob_ocr = F.softmax(preds_ocr, dim=2)
preds_max_prob_ocr, _ = preds_prob_ocr.max(dim=2)
preds_prob_1 = F.softmax(preds_1, dim=2)
preds_max_prob_1, _ = preds_prob_1.max(dim=2)
preds_prob_2 = F.softmax(preds_2, dim=2)
preds_max_prob_2, _ = preds_prob_2.max(dim=2)
confidence_score_list_ocr = []
confidence_score_list_1 = []
confidence_score_list_2 = []
for gt_ocr, pred_ocr, pred_max_prob_ocr, gt_1, pred_1, pred_max_prob_1, gt_2, pred_2, pred_max_prob_2 in zip(labels_1, preds_str_ocr, preds_max_prob_ocr, labels_1, preds_str_1, preds_max_prob_1, labels_2, preds_str_2, preds_max_prob_2):
if 'Attn' in opt.Prediction:
gt = gt[:gt.find('[s]')]
pred_EOS = pred.find('[s]')
pred = pred[:pred_EOS] # prune after "end of sentence" token ([s])
pred_max_prob = pred_max_prob[:pred_EOS]
# To evaluate 'case sensitive model' with alphanumeric and case insensitve setting.
if opt.sensitive and opt.data_filtering_off:
pred = pred.lower()
gt = gt.lower()
alphanumeric_case_insensitve = '0123456789abcdefghijklmnopqrstuvwxyz'
out_of_alphanumeric_case_insensitve = f'[^{alphanumeric_case_insensitve}]'
pred = re.sub(out_of_alphanumeric_case_insensitve, '', pred)
gt = re.sub(out_of_alphanumeric_case_insensitve, '', gt)
if pred_ocr == gt_ocr:
n_correct_ocr += 1
if pred_1 == gt_1:
n_correct_1 += 1
if pred_2 == gt_2:
n_correct_2 += 1
'''
(old version) ICDAR2017 DOST Normalized Edit Distance https://rrc.cvc.uab.es/?ch=7&com=tasks
"For each word we calculate the normalized edit distance to the length of the ground truth transcription."
if len(gt) == 0:
norm_ED += 1
else:
norm_ED += edit_distance(pred, gt) / len(gt)
'''
# ICDAR2019 Normalized Edit Distance
if len(gt_1) == 0 or len(pred_1) == 0:
norm_ED_1 += 0
elif len(gt_1) > len(pred_1):
norm_ED_1 += 1 - edit_distance(pred_1, gt_1) / len(gt_1)
else:
norm_ED_1 += 1 - edit_distance(pred_1, gt_1) / len(pred_1)
# ICDAR2019 Normalized Edit Distance
if len(gt_2) == 0 or len(pred_2) == 0:
norm_ED_2 += 0
elif len(gt_2) > len(pred_2):
norm_ED_2 += 1 - edit_distance(pred_2, gt_2) / len(gt_2)
else:
norm_ED_2 += 1 - edit_distance(pred_2, gt_2) / len(pred_2)
# ICDAR2019 Normalized Edit Distance
if len(gt_ocr) == 0 or len(pred_ocr) == 0:
norm_ED_ocr += 0
elif len(gt_ocr) > len(pred_ocr):
norm_ED_ocr += 1 - edit_distance(pred_ocr, gt_ocr) / len(gt_ocr)
else:
norm_ED_ocr += 1 - edit_distance(pred_ocr, gt_ocr) / len(pred_ocr)
# calculate confidence score (= multiply of pred_max_prob)
try:
confidence_score_ocr = pred_max_prob_ocr.cumprod(dim=0)[-1]
confidence_score_1 = pred_max_prob_1.cumprod(dim=0)[-1]
confidence_score_2 = pred_max_prob_2.cumprod(dim=0)[-1]
except:
confidence_score_ocr = 0
confidence_score_1 = 0 # for empty pred case, when prune after "end of sentence" token ([s])
confidence_score_2 = 0 # for empty pred case, when prune after "end of sentence" token ([s])
confidence_score_list_ocr.append(confidence_score_ocr)
confidence_score_list_1.append(confidence_score_1)
confidence_score_list_2.append(confidence_score_2)
# print(pred, gt, pred==gt, confidence_score)
accuracy_ocr = n_correct_ocr / float(length_of_data) * 100
norm_ED_ocr = norm_ED_ocr / float(length_of_data) # ICDAR2019 Normalized Edit Distance
accuracy_1 = n_correct_1 / float(length_of_data) * 100
norm_ED_1 = norm_ED_1 / float(length_of_data) # ICDAR2019 Normalized Edit Distance
accuracy_2 = n_correct_2 / float(length_of_data) * 100
norm_ED_2 = norm_ED_2 / float(length_of_data) # ICDAR2019 Normalized Edit Distance
return [valid_loss_avg_ocr.val(), valid_loss_avg.val(), valid_loss_avg_dis.val()], [accuracy_ocr,accuracy_1,accuracy_2], [norm_ED_ocr,norm_ED_1,norm_ED_2], [preds_str_ocr, preds_str_1,preds_str_2], [confidence_score_list_ocr,confidence_score_list_1,confidence_score_list_2], [labels_1,labels_1,labels_2], infer_time, length_of_data
def validation_synth_lrw(iterCntr, synthModel, ocrModel, disModel, recCriterion, styleRecCriterion, ocrCriterion, evaluation_loader, converter, opt):
""" validation or evaluation """
os.makedirs(os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr)), exist_ok=True)
n_correct_ocr = 0
norm_ED_ocr = 0
n_correct_1 = 0
norm_ED_1 = 0
n_correct_2 = 0
norm_ED_2 = 0
length_of_data = 0
infer_time = 0
valid_loss_avg_ocr = Averager()
valid_loss_avg = Averager()
valid_loss_avg_dis = Averager()
lexicons=[]
out_of_char = f'[^{opt.character}]'
#read lexicons file
with open(opt.lexFile,'r') as lexF:
for line in lexF:
lexWord = line[:-1]
if len(lexWord) <= opt.batch_max_length and not(re.search(out_of_char, lexWord.lower())):
lexicons.append(lexWord)
for i, (image_tensors_all, labels_1_all) in enumerate(evaluation_loader):
# print(i)
if opt.debugFlag and i>2:
break
disCnt = int(image_tensors_all.size(0)/2)
image_tensors, image_tensors_real, labels_1 = image_tensors_all[:disCnt], image_tensors_all[disCnt:disCnt+disCnt], labels_1_all[:disCnt]
batch_size = image_tensors.size(0)
#generate lexicons
labels_2 = random.sample(lexicons, batch_size)
length_of_data = length_of_data + batch_size
image = image_tensors.to(device)
image_real = image_tensors_real.to(device)
# For max length prediction
length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
text_for_loss_1, length_for_loss_1 = converter.encode(labels_1, batch_max_length=opt.batch_max_length)
text_for_loss_2, length_for_loss_2 = converter.encode(labels_2, batch_max_length=opt.batch_max_length)
start_time = time.time()
images_recon_1, images_recon_2, style = synthModel(image, text_for_loss_1, text_for_loss_2)
#Save random reconstructed image and write its gt
rIdx = random.randint(0,batch_size-1)
try:
save_image(tensor2im(image[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_input_'+labels_1[rIdx]+'.png'))
save_image(tensor2im(images_recon_1[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_recon_'+labels_1[rIdx]+'.png'))
save_image(tensor2im(images_recon_2[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_pair_'+labels_2[rIdx]+'.png'))
except:
print('Warning while saving validation image')
if 'CTC' in opt.Prediction:
preds_ocr = ocrModel(image, text_for_pred)
preds_1 = ocrModel(images_recon_1, text_for_pred)
preds_2 = ocrModel(images_recon_2, text_for_pred)
forward_time = time.time() - start_time
# Calculate evaluation loss for CTC deocder.
preds_size_1 = torch.IntTensor([preds_1.size(1)] * batch_size)
preds_size_2 = torch.IntTensor([preds_2.size(1)] * batch_size)
# permute 'preds' to use CTCloss format
ocrCost_ocr = ocrCriterion(preds_ocr.log_softmax(2).permute(1, 0, 2), text_for_loss_1, preds_size_1, length_for_loss_1)
ocrCost_1 = ocrCriterion(preds_1.log_softmax(2).permute(1, 0, 2), text_for_loss_1, preds_size_1, length_for_loss_1)
ocrCost_2 = ocrCriterion(preds_2.log_softmax(2).permute(1, 0, 2), text_for_loss_2, preds_size_2, length_for_loss_2)
# Select max probabilty (greedy decoding) then decode index to character
_, preds_index_ocr = preds_ocr.max(2)
_, preds_index_1 = preds_1.max(2)
_, preds_index_2 = preds_2.max(2)
preds_str_ocr = converter.decode(preds_index_ocr.data, preds_size_1.data)
preds_str_1 = converter.decode(preds_index_1.data, preds_size_1.data)
preds_str_2 = converter.decode(preds_index_2.data, preds_size_2.data)
disCost = 0.5*(disModel.module.calc_dis_loss(images_recon_1.detach(), image_real) + disModel.module.calc_dis_loss(images_recon_2.detach(), image))
disGenCost = 0.5*(disModel.module.calc_gen_loss(images_recon_1)+disModel.module.calc_gen_loss(images_recon_2))
else:
preds = model(image, text_for_pred, is_train=False)
forward_time = time.time() - start_time
preds = preds[:, :text_for_loss.shape[1] - 1, :]
target = text_for_loss[:, 1:] # without [GO] Symbol
cost = criterion(preds.contiguous().view(-1, preds.shape[-1]), target.contiguous().view(-1))
# select max probabilty (greedy decoding) then decode index to character
_, preds_index = preds.max(2)
preds_str = converter.decode(preds_index, length_for_pred)
labels = converter.decode(text_for_loss[:, 1:], length_for_loss)
recCost = recCriterion(images_recon_1,image)
styleRecCost = styleRecCriterion(synthModel(images_recon_2, None, None, styleFlag=True), style.detach())
infer_time += forward_time
valid_loss_avg_ocr.add(ocrCost_ocr)
valid_loss_avg.add(opt.ocrWeight*(0.5*(ocrCost_1+ocrCost_2))+opt.reconWeight*recCost+opt.disWeight*disGenCost+opt.styleReconWeight*styleRecCost)
valid_loss_avg_dis.add(opt.disWeight*disCost)
# calculate accuracy & confidence score
preds_prob_ocr = F.softmax(preds_ocr, dim=2)
preds_max_prob_ocr, _ = preds_prob_ocr.max(dim=2)
preds_prob_1 = F.softmax(preds_1, dim=2)
preds_max_prob_1, _ = preds_prob_1.max(dim=2)
preds_prob_2 = F.softmax(preds_2, dim=2)
preds_max_prob_2, _ = preds_prob_2.max(dim=2)
confidence_score_list_ocr = []
confidence_score_list_1 = []
confidence_score_list_2 = []
for gt_ocr, pred_ocr, pred_max_prob_ocr, gt_1, pred_1, pred_max_prob_1, gt_2, pred_2, pred_max_prob_2 in zip(labels_1, preds_str_ocr, preds_max_prob_ocr, labels_1, preds_str_1, preds_max_prob_1, labels_2, preds_str_2, preds_max_prob_2):
if 'Attn' in opt.Prediction:
gt = gt[:gt.find('[s]')]
pred_EOS = pred.find('[s]')
pred = pred[:pred_EOS] # prune after "end of sentence" token ([s])
pred_max_prob = pred_max_prob[:pred_EOS]
# To evaluate 'case sensitive model' with alphanumeric and case insensitve setting.
if opt.sensitive and opt.data_filtering_off:
pred = pred.lower()
gt = gt.lower()
alphanumeric_case_insensitve = '0123456789abcdefghijklmnopqrstuvwxyz'
out_of_alphanumeric_case_insensitve = f'[^{alphanumeric_case_insensitve}]'
pred = re.sub(out_of_alphanumeric_case_insensitve, '', pred)
gt = re.sub(out_of_alphanumeric_case_insensitve, '', gt)
if pred_ocr == gt_ocr:
n_correct_ocr += 1
if pred_1 == gt_1:
n_correct_1 += 1
if pred_2 == gt_2:
n_correct_2 += 1
'''
(old version) ICDAR2017 DOST Normalized Edit Distance https://rrc.cvc.uab.es/?ch=7&com=tasks
"For each word we calculate the normalized edit distance to the length of the ground truth transcription."
if len(gt) == 0:
norm_ED += 1
else:
norm_ED += edit_distance(pred, gt) / len(gt)
'''
# ICDAR2019 Normalized Edit Distance
if len(gt_1) == 0 or len(pred_1) == 0:
norm_ED_1 += 0
elif len(gt_1) > len(pred_1):
norm_ED_1 += 1 - edit_distance(pred_1, gt_1) / len(gt_1)
else:
norm_ED_1 += 1 - edit_distance(pred_1, gt_1) / len(pred_1)
# ICDAR2019 Normalized Edit Distance
if len(gt_2) == 0 or len(pred_2) == 0:
norm_ED_2 += 0
elif len(gt_2) > len(pred_2):
norm_ED_2 += 1 - edit_distance(pred_2, gt_2) / len(gt_2)
else:
norm_ED_2 += 1 - edit_distance(pred_2, gt_2) / len(pred_2)
# ICDAR2019 Normalized Edit Distance
if len(gt_ocr) == 0 or len(pred_ocr) == 0:
norm_ED_ocr += 0
elif len(gt_ocr) > len(pred_ocr):
norm_ED_ocr += 1 - edit_distance(pred_ocr, gt_ocr) / len(gt_ocr)
else:
norm_ED_ocr += 1 - edit_distance(pred_ocr, gt_ocr) / len(pred_ocr)
# calculate confidence score (= multiply of pred_max_prob)
try:
confidence_score_ocr = pred_max_prob_ocr.cumprod(dim=0)[-1]
confidence_score_1 = pred_max_prob_1.cumprod(dim=0)[-1]
confidence_score_2 = pred_max_prob_2.cumprod(dim=0)[-1]
except:
confidence_score_ocr = 0
confidence_score_1 = 0 # for empty pred case, when prune after "end of sentence" token ([s])
confidence_score_2 = 0 # for empty pred case, when prune after "end of sentence" token ([s])
confidence_score_list_ocr.append(confidence_score_ocr)
confidence_score_list_1.append(confidence_score_1)
confidence_score_list_2.append(confidence_score_2)
# print(pred, gt, pred==gt, confidence_score)
accuracy_ocr = n_correct_ocr / float(length_of_data) * 100
norm_ED_ocr = norm_ED_ocr / float(length_of_data) # ICDAR2019 Normalized Edit Distance
accuracy_1 = n_correct_1 / float(length_of_data) * 100
norm_ED_1 = norm_ED_1 / float(length_of_data) # ICDAR2019 Normalized Edit Distance
accuracy_2 = n_correct_2 / float(length_of_data) * 100
norm_ED_2 = norm_ED_2 / float(length_of_data) # ICDAR2019 Normalized Edit Distance
return [valid_loss_avg_ocr.val(), valid_loss_avg.val(), valid_loss_avg_dis.val()], [accuracy_ocr,accuracy_1,accuracy_2], [norm_ED_ocr,norm_ED_1,norm_ED_2], [preds_str_ocr, preds_str_1,preds_str_2], [confidence_score_list_ocr,confidence_score_list_1,confidence_score_list_2], [labels_1,labels_1,labels_2], infer_time, length_of_data
def validation_synth_lrw_res(iterCntr, synthModel, ocrModel, disModel, recCriterion, styleRecCriterion, ocrCriterion, evaluation_loader, converter, opt):
""" validation or evaluation """
os.makedirs(os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr)), exist_ok=True)
random.seed(1024)
n_correct_ocr = 0
norm_ED_ocr = 0
n_correct_1 = 0
norm_ED_1 = 0
n_correct_2 = 0
norm_ED_2 = 0
length_of_data = 0
infer_time = 0
valid_loss_avg_ocr = Averager()
valid_loss_avg = Averager()
valid_loss_avg_dis = Averager()
valid_loss_avg_ocrRecon_1 = Averager()
valid_loss_avg_ocrRecon_2 = Averager()
valid_loss_avg_gen = Averager()
valid_loss_avg_imgRecon = Averager()
valid_loss_avg_styRecon = Averager()
lexicons=[]
out_of_char = f'[^{opt.character}]'
#read lexicons file
with open(opt.lexFile,'r') as lexF:
for line in lexF:
lexWord = line[:-1]
if opt.fixedString and len(lexWord)!=opt.batch_exact_length:
continue
if len(lexWord) <= opt.batch_max_length and not(re.search(out_of_char, lexWord.lower())) and len(lexWord) >= opt.batch_min_length:
lexicons.append(lexWord)
for i, (image_tensors_all, labels_1_all) in enumerate(evaluation_loader):
# print(i)
if opt.debugFlag and i>0:
break
disCnt = int(image_tensors_all.size(0)/2)
image_tensors, image_tensors_real, labels_gt = image_tensors_all[:disCnt], image_tensors_all[disCnt:disCnt+disCnt], labels_1_all[:disCnt]
image = image_tensors.to(device)
image_real = image_tensors_real.to(device)
batch_size = image_tensors.size(0)
##-----------------------------------##
#generate text(labels) from ocr.forward
if opt.ocrFixed:
length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
if 'CTC' in opt.Prediction:
preds = ocrModel(image, text_for_pred)
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
_, preds_index = preds.max(2)
labels_1 = converter.decode(preds_index.data, preds_size.data)
else:
preds = ocrModel(image, text_for_pred, is_train=False)
_, preds_index = preds.max(2)
labels_1 = converter.decode(preds_index, length_for_pred)
for idx, pred in enumerate(labels_1):
pred_EOS = pred.find('[s]')
labels_1[idx] = pred[:pred_EOS] # prune after "end of sentence" token ([s])
else:
labels_1 = labels_gt
##-----------------------------------##
#generate lexicon labels
labels_2 = random.sample(lexicons, batch_size)
length_of_data = length_of_data + batch_size
# For max length prediction
length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
text_for_loss_ocr, length_for_loss_ocr = converter.encode(labels_gt, batch_max_length=opt.batch_max_length)
text_for_loss_1, length_for_loss_1 = converter.encode(labels_1, batch_max_length=opt.batch_max_length)
text_for_loss_2, length_for_loss_2 = converter.encode(labels_2, batch_max_length=opt.batch_max_length)
start_time = time.time()
if image.shape[0] == 0:
continue
images_recon_1, images_recon_2, style = synthModel(image, text_for_loss_1, text_for_loss_2)
# #Save random reconstructed image and write its gt
# rIdx = random.randint(0,batch_size-1)
# try:
# save_image(tensor2im(image[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_input_'+labels_gt[rIdx]+'.png'))
# save_image(tensor2im(images_recon_1[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_recon_'+labels_1[rIdx]+'.png'))
# save_image(tensor2im(images_recon_2[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_pair_'+labels_2[rIdx]+'.png'))
# except:
# print('Warning while saving validation image')
if 'CTC' in opt.Prediction:
# if not opt.ocrFixed:
#ocr evaluations with orig image
preds_ocr = ocrModel(image, text_for_pred)
preds_size_ocr = torch.IntTensor([preds_ocr.size(1)] * batch_size)
ocrCost_ocr = ocrCriterion(preds_ocr.log_softmax(2).permute(1, 0, 2), text_for_loss_ocr, preds_size_ocr, length_for_loss_ocr)
_, preds_index_ocr = preds_ocr.max(2)
preds_str_ocr = converter.decode(preds_index_ocr.data, preds_size_ocr.data)
#content loss for reconstructed images
# permute 'preds' to use CTCloss format
preds_1 = ocrModel(images_recon_1, text_for_pred)
preds_size_1 = torch.IntTensor([preds_1.size(1)] * batch_size)
ocrCost_1 = ocrCriterion(preds_1.log_softmax(2).permute(1, 0, 2), text_for_loss_1, preds_size_1, length_for_loss_1)
_, preds_index_1 = preds_1.max(2)
preds_str_1 = converter.decode(preds_index_1.data, preds_size_1.data)
preds_2 = ocrModel(images_recon_2, text_for_pred)
preds_size_2 = torch.IntTensor([preds_2.size(1)] * batch_size)
ocrCost_2 = ocrCriterion(preds_2.log_softmax(2).permute(1, 0, 2), text_for_loss_2, preds_size_2, length_for_loss_2)
_, preds_index_2 = preds_2.max(2)
preds_str_2 = converter.decode(preds_index_2.data, preds_size_2.data)
else:
# if not opt.ocrFixed:
#ocr evaluations with orig image
preds_ocr = ocrModel(image, text_for_pred, is_train=False)
preds_ocr = preds_ocr[:, :text_for_loss_ocr.shape[1] - 1, :]
target_ocr = text_for_loss_ocr[:, 1:] # without [GO] Symbol
ocrCost_ocr = ocrCriterion(preds_ocr.contiguous().view(-1, preds_ocr.shape[-1]), target_ocr.contiguous().view(-1))
_, preds_index = preds_ocr.max(2)
preds_str_ocr = converter.decode(preds_index, length_for_pred)
# labels_1 = converter.decode(text_for_loss_1[:, 1:], length_for_loss_1)
# else:
# ocrCost_ocr = torch.tensor(0.0)
#ocr evaluations with orig image
preds_1 = ocrModel(images_recon_1, text_for_pred, is_train=False)
preds_1 = preds_1[:, :text_for_loss_1.shape[1] - 1, :]
target_1 = text_for_loss_1[:, 1:] # without [GO] Symbol
ocrCost_1 = ocrCriterion(preds_1.contiguous().view(-1, preds_1.shape[-1]), target_1.contiguous().view(-1))
_, preds_index_1 = preds_1.max(2)
preds_str_1 = converter.decode(preds_index_1, length_for_pred)
preds_2 = ocrModel(images_recon_2, text_for_pred, is_train=False)
preds_2 = preds_2[:, :text_for_loss_2.shape[1] - 1, :]
target_2 = text_for_loss_2[:, 1:] # without [GO] Symbol
ocrCost_2 = ocrCriterion(preds_2.contiguous().view(-1, preds_2.shape[-1]), target_2.contiguous().view(-1))
_, preds_index_2 = preds_2.max(2)
preds_str_2 = converter.decode(preds_index_2, length_for_pred)
forward_time = time.time() - start_time
if disModel == None:
disCost = torch.tensor(0.0)
disGenCost = torch.tensor(0.0)
else:
if opt.gan_type == 'wgan':
disCost = torch.tensor(0.0)
else:
disCost = 0.5*(disModel.module.calc_dis_loss(images_recon_1.detach(), image_real) + disModel.module.calc_dis_loss(images_recon_2.detach(), image))
disGenCost = 0.5*(disModel.module.calc_gen_loss(images_recon_1)+disModel.module.calc_gen_loss(images_recon_2))
if opt.imgReconLoss == 'ssim':
recCost = -1*recCriterion(images_recon_1,image, val_range=2)
elif opt.imgReconLoss == 'ms-ssim':
recCost = -1*recCriterion(images_recon_1,image, val_range=2, normalize='relu')
else:
recCost = recCriterion(images_recon_1,image)
if opt.styleReconWeight == 0.0:
styleRecCost = torch.tensor(0.0)
else:
styleRecCost = styleRecCriterion(synthModel(images_recon_2, None, None, styleFlag=True), style)
infer_time += forward_time
valid_loss_avg_ocr.add(ocrCost_ocr)
valid_loss_avg.add(opt.ocrWeight*(0.5*(opt.ocrWeight_1*ocrCost_1+opt.ocrWeight_2*ocrCost_2))+opt.reconWeight*recCost+opt.disWeight*disGenCost+opt.styleReconWeight*styleRecCost)
valid_loss_avg_dis.add(opt.disWeight*disCost)
#fine grained losses
valid_loss_avg_ocrRecon_1.add(opt.ocrWeight*(0.5*(opt.ocrWeight_1*ocrCost_1)))
valid_loss_avg_ocrRecon_2.add(opt.ocrWeight*(0.5*(opt.ocrWeight_2*ocrCost_2)))
valid_loss_avg_gen.add(opt.disWeight*disGenCost)
valid_loss_avg_imgRecon.add(opt.reconWeight*recCost)
valid_loss_avg_styRecon.add(opt.styleReconWeight*styleRecCost)
# if not opt.ocrFixed:
# calculate accuracy & confidence score
preds_prob_ocr = F.softmax(preds_ocr, dim=2)
preds_max_prob_ocr, _ = preds_prob_ocr.max(dim=2)
preds_prob_1 = F.softmax(preds_1, dim=2)
preds_max_prob_1, _ = preds_prob_1.max(dim=2)
preds_prob_2 = F.softmax(preds_2, dim=2)
preds_max_prob_2, _ = preds_prob_2.max(dim=2)
confidence_score_list_ocr = []
confidence_score_list_1 = []
confidence_score_list_2 = []
# zCntr=0
for gt_ocr, pred_ocr, pred_max_prob_ocr, gt_1, pred_1, pred_max_prob_1, gt_2, pred_2, pred_max_prob_2 in zip(labels_gt, preds_str_ocr, preds_max_prob_ocr, labels_1, preds_str_1, preds_max_prob_1, labels_2, preds_str_2, preds_max_prob_2):
if 'Attn' in opt.Prediction:
# if not opt.ocrFixed:
# gt_ocr = gt_ocr[:gt_ocr.find('[s]')]
pred_EOS = pred_ocr.find('[s]')
pred_ocr = pred_ocr[:pred_EOS] # prune after "end of sentence" token ([s])
pred_max_prob_ocr = pred_max_prob_ocr[:pred_EOS]
# gt_1 = gt_1[:gt_1.find('[s]')]
pred_EOS = pred_1.find('[s]')
pred_1 = pred_1[:pred_EOS] # prune after "end of sentence" token ([s])
pred_max_prob_1 = pred_max_prob_1[:pred_EOS]
# gt_2 = gt_2[:gt_2.find('[s]')]
pred_EOS = pred_2.find('[s]')
pred_2 = pred_2[:pred_EOS] # prune after "end of sentence" token ([s])
pred_max_prob_2 = pred_max_prob_2[:pred_EOS]
# # To evaluate 'case sensitive model' with alphanumeric and case insensitve setting.
# if opt.sensitive and opt.data_filtering_off:
# pred = pred.lower()
# gt = gt.lower()
# alphanumeric_case_insensitve = '0123456789abcdefghijklmnopqrstuvwxyz'
# out_of_alphanumeric_case_insensitve = f'[^{alphanumeric_case_insensitve}]'
# pred = re.sub(out_of_alphanumeric_case_insensitve, '', pred)
# gt = re.sub(out_of_alphanumeric_case_insensitve, '', gt)
if pred_ocr == gt_ocr:
n_correct_ocr += 1
# else:
# n_correct_ocr=0
if pred_1 == gt_1:
n_correct_1 += 1
if pred_2 == gt_2:
n_correct_2 += 1
'''
(old version) ICDAR2017 DOST Normalized Edit Distance https://rrc.cvc.uab.es/?ch=7&com=tasks
"For each word we calculate the normalized edit distance to the length of the ground truth transcription."
if len(gt) == 0:
norm_ED += 1
else:
norm_ED += edit_distance(pred, gt) / len(gt)
'''
# ICDAR2019 Normalized Edit Distance
if len(gt_1) == 0 or len(pred_1) == 0:
norm_ED_1 += 0
elif len(gt_1) > len(pred_1):
norm_ED_1 += 1 - edit_distance(pred_1, gt_1) / len(gt_1)
else:
norm_ED_1 += 1 - edit_distance(pred_1, gt_1) / len(pred_1)
# ICDAR2019 Normalized Edit Distance
if len(gt_2) == 0 or len(pred_2) == 0:
norm_ED_2 += 0
elif len(gt_2) > len(pred_2):
norm_ED_2 += 1 - edit_distance(pred_2, gt_2) / len(gt_2)
else:
norm_ED_2 += 1 - edit_distance(pred_2, gt_2) / len(pred_2)
# if not opt.ocrFixed:
# ICDAR2019 Normalized Edit Distance
if len(gt_ocr) == 0 or len(pred_ocr) == 0:
norm_ED_ocr += 0
elif len(gt_ocr) > len(pred_ocr):
norm_ED_ocr += 1 - edit_distance(pred_ocr, gt_ocr) / len(gt_ocr)
else:
norm_ED_ocr += 1 - edit_distance(pred_ocr, gt_ocr) / len(pred_ocr)
# else:
# norm_ED_ocr=0
# calculate confidence score (= multiply of pred_max_prob)
try:
# if not opt.ocrFixed:
confidence_score_ocr = pred_max_prob_ocr.cumprod(dim=0)[-1]
# else:
# confidence_score_ocr = 1.0
confidence_score_1 = pred_max_prob_1.cumprod(dim=0)[-1]
confidence_score_2 = pred_max_prob_2.cumprod(dim=0)[-1]
except:
confidence_score_ocr = 0
confidence_score_1 = 0 # for empty pred case, when prune after "end of sentence" token ([s])
confidence_score_2 = 0 # for empty pred case, when prune after "end of sentence" token ([s])
confidence_score_list_ocr.append(confidence_score_ocr)
confidence_score_list_1.append(confidence_score_1)
confidence_score_list_2.append(confidence_score_2)
# print(pred, gt, pred==gt, confidence_score)
# zCntr+=1
#Save random reconstructed image and write its gt
if opt.testFlag:
randomSaveIdx = list(range(0,batch_size))
else:
randomSaveIdx = [random.randint(0,batch_size-1)]
for rIdx in randomSaveIdx:
if 'Attn' in opt.Prediction:
r_pred_EOS = preds_str_ocr[rIdx].find('[s]')
r_pred_ocr = preds_str_ocr[rIdx][:r_pred_EOS]
r_pred_1_EOS = preds_str_1[rIdx].find('[s]')
r_pred_1 = preds_str_1[rIdx][:r_pred_1_EOS]
r_pred_2_EOS = preds_str_2[rIdx].find('[s]')
r_pred_2 = preds_str_2[rIdx][:r_pred_2_EOS]
else:
r_pred_ocr = preds_str_ocr[rIdx]
r_pred_1 = preds_str_1[rIdx]
r_pred_2 = preds_str_2[rIdx]
try:
save_image(tensor2im(image[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_input_'+labels_gt[rIdx]+'_'+r_pred_ocr+'.png'))
save_image(tensor2im(images_recon_1[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_recon_'+labels_1[rIdx]+'_'+r_pred_1+'.png'))
save_image(tensor2im(images_recon_2[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_pair_'+labels_2[rIdx]+'_'+r_pred_2+'.png'))
except:
print('Warning while saving validation image')
accuracy_ocr = n_correct_ocr / float(length_of_data) * 100
norm_ED_ocr = norm_ED_ocr / float(length_of_data) # ICDAR2019 Normalized Edit Distance
accuracy_1 = n_correct_1 / float(length_of_data) * 100
norm_ED_1 = norm_ED_1 / float(length_of_data) # ICDAR2019 Normalized Edit Distance
accuracy_2 = n_correct_2 / float(length_of_data) * 100
norm_ED_2 = norm_ED_2 / float(length_of_data) # ICDAR2019 Normalized Edit Distance
random.seed()
return [valid_loss_avg_ocr.val(), valid_loss_avg.val(), valid_loss_avg_dis.val(), valid_loss_avg_ocrRecon_1.val(),valid_loss_avg_ocrRecon_2.val(), valid_loss_avg_gen.val(), valid_loss_avg_imgRecon.val(), valid_loss_avg_styRecon.val()], [accuracy_ocr,accuracy_1,accuracy_2], [norm_ED_ocr,norm_ED_1,norm_ED_2], [preds_str_ocr, preds_str_1,preds_str_2], [confidence_score_list_ocr,confidence_score_list_1,confidence_score_list_2], [labels_gt,labels_1,labels_2], infer_time, length_of_data
def validation_synth_v2(iterCntr, synthModel, ocrModel, disModel, recCriterion, styleRecCriterion, ocrCriterion, evaluation_loader, converter, opt):
""" validation or evaluation """
os.makedirs(os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr)), exist_ok=True)
random.seed(1024)
n_correct_ocr = 0
norm_ED_ocr = 0
n_correct_1 = 0
norm_ED_1 = 0
n_correct_2 = 0
norm_ED_2 = 0
length_of_data = 0
infer_time = 0
valid_loss_avg_ocr = Averager()
valid_loss_avg = Averager()
valid_loss_avg_dis = Averager()
valid_loss_avg_ocrRecon_1 = Averager()
valid_loss_avg_ocrRecon_2 = Averager()
valid_loss_avg_gen = Averager()
valid_loss_avg_imgRecon = Averager()
valid_loss_avg_styRecon = Averager()
lexicons=[]
out_of_char = f'[^{opt.character}]'
#read lexicons file
with open(opt.lexFile,'r') as lexF:
for line in lexF:
lexWord = line[:-1]
if opt.fixedString and len(lexWord)!=opt.batch_exact_length:
continue
if len(lexWord) <= opt.batch_max_length and not(re.search(out_of_char, lexWord.lower())) and len(lexWord) >= opt.batch_min_length:
lexicons.append(lexWord)
for i, (image_input_tensors, image_gt_tensors, labels_1, labels_2) in enumerate(evaluation_loader):
# print(i)
if opt.debugFlag and i>0:
break
# disCnt = int(image_tensors_all.size(0)/2)
labels_gt = labels_1
image_input_tensors = image_input_tensors.to(device)
image_gt_tensors = image_gt_tensors.to(device)
batch_size = image_input_tensors.size(0)
##-----------------------------------##
#generate text(labels) from ocr.forward
if opt.ocrFixed:
length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
if 'CTC' in opt.Prediction:
preds = ocrModel(image_input_tensors, text_for_pred)
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
_, preds_index = preds.max(2)
labels_1 = converter.decode(preds_index.data, preds_size.data)
else:
preds = ocrModel(image_input_tensors, text_for_pred, is_train=False)
_, preds_index = preds.max(2)
labels_1 = converter.decode(preds_index, length_for_pred)
for idx, pred in enumerate(labels_1):
pred_EOS = pred.find('[s]')
labels_1[idx] = pred[:pred_EOS] # prune after "end of sentence" token ([s])
##-----------------------------------##
length_of_data = length_of_data + batch_size
# For max length prediction
length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
text_for_loss_ocr, length_for_loss_ocr = converter.encode(labels_gt, batch_max_length=opt.batch_max_length)
text_for_loss_1, length_for_loss_1 = converter.encode(labels_1, batch_max_length=opt.batch_max_length)
text_for_loss_2, length_for_loss_2 = converter.encode(labels_2, batch_max_length=opt.batch_max_length)
start_time = time.time()
if image_input_tensors.shape[0] == 0:
continue
images_recon_1, images_recon_2, style = synthModel(image_input_tensors, text_for_loss_1, text_for_loss_2)
# #Save random reconstructed image and write its gt
# rIdx = random.randint(0,batch_size-1)
# try:
# save_image(tensor2im(image[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_input_'+labels_gt[rIdx]+'.png'))
# save_image(tensor2im(images_recon_1[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_recon_'+labels_1[rIdx]+'.png'))
# save_image(tensor2im(images_recon_2[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_pair_'+labels_2[rIdx]+'.png'))
# except:
# print('Warning while saving validation image')
if 'CTC' in opt.Prediction:
# if not opt.ocrFixed:
#ocr evaluations with orig image
preds_ocr = ocrModel(image_input_tensors, text_for_pred)
preds_size_ocr = torch.IntTensor([preds_ocr.size(1)] * batch_size)
ocrCost_ocr = ocrCriterion(preds_ocr.log_softmax(2).permute(1, 0, 2), text_for_loss_ocr, preds_size_ocr, length_for_loss_ocr)
_, preds_index_ocr = preds_ocr.max(2)
preds_str_ocr = converter.decode(preds_index_ocr.data, preds_size_ocr.data)
#content loss for reconstructed images
# permute 'preds' to use CTCloss format
preds_1 = ocrModel(images_recon_1, text_for_pred)
preds_size_1 = torch.IntTensor([preds_1.size(1)] * batch_size)
ocrCost_1 = ocrCriterion(preds_1.log_softmax(2).permute(1, 0, 2), text_for_loss_1, preds_size_1, length_for_loss_1)
_, preds_index_1 = preds_1.max(2)
preds_str_1 = converter.decode(preds_index_1.data, preds_size_1.data)
preds_2 = ocrModel(images_recon_2, text_for_pred)
preds_size_2 = torch.IntTensor([preds_2.size(1)] * batch_size)
ocrCost_2 = ocrCriterion(preds_2.log_softmax(2).permute(1, 0, 2), text_for_loss_2, preds_size_2, length_for_loss_2)
_, preds_index_2 = preds_2.max(2)
preds_str_2 = converter.decode(preds_index_2.data, preds_size_2.data)
else:
# if not opt.ocrFixed:
#ocr evaluations with orig image
preds_ocr = ocrModel(image_input_tensors, text_for_pred, is_train=False)
preds_ocr = preds_ocr[:, :text_for_loss_ocr.shape[1] - 1, :]
target_ocr = text_for_loss_ocr[:, 1:] # without [GO] Symbol
ocrCost_ocr = ocrCriterion(preds_ocr.contiguous().view(-1, preds_ocr.shape[-1]), target_ocr.contiguous().view(-1))
_, preds_index = preds_ocr.max(2)
preds_str_ocr = converter.decode(preds_index, length_for_pred)
# labels_1 = converter.decode(text_for_loss_1[:, 1:], length_for_loss_1)
# else:
# ocrCost_ocr = torch.tensor(0.0)
#ocr evaluations with orig image
preds_1 = ocrModel(images_recon_1, text_for_pred, is_train=False)
preds_1 = preds_1[:, :text_for_loss_1.shape[1] - 1, :]
target_1 = text_for_loss_1[:, 1:] # without [GO] Symbol
ocrCost_1 = ocrCriterion(preds_1.contiguous().view(-1, preds_1.shape[-1]), target_1.contiguous().view(-1))
_, preds_index_1 = preds_1.max(2)
preds_str_1 = converter.decode(preds_index_1, length_for_pred)
preds_2 = ocrModel(images_recon_2, text_for_pred, is_train=False)
preds_2 = preds_2[:, :text_for_loss_2.shape[1] - 1, :]
target_2 = text_for_loss_2[:, 1:] # without [GO] Symbol
ocrCost_2 = ocrCriterion(preds_2.contiguous().view(-1, preds_2.shape[-1]), target_2.contiguous().view(-1))
_, preds_index_2 = preds_2.max(2)
preds_str_2 = converter.decode(preds_index_2, length_for_pred)
forward_time = time.time() - start_time
if disModel == None:
disCost = torch.tensor(0.0)
disGenCost = torch.tensor(0.0)
else:
if opt.gan_type == 'wgan':
disCost = torch.tensor(0.0)
else:
disCost = 0.5*(disModel.module.calc_dis_loss(images_recon_1.detach(), image_input_tensors) + disModel.module.calc_dis_loss(images_recon_2.detach(), image_gt_tensors))
disGenCost = 0.5*(disModel.module.calc_gen_loss(images_recon_1)+disModel.module.calc_gen_loss(images_recon_2))
if opt.imgReconLoss == 'ssim':
recCost = -1*recCriterion(images_recon_1,image, val_range=2)
elif opt.imgReconLoss == 'ms-ssim':
recCost = -1*recCriterion(images_recon_1,image, val_range=2, normalize='relu')
else:
recCost = 0.5*(recCriterion(images_recon_1,image_input_tensors)+recCriterion(images_recon_2,image_gt_tensors))
if opt.styleReconWeight == 0.0:
styleRecCost = torch.tensor(0.0)
else:
styleRecCost = 0.33*(styleRecCriterion(synthModel(image_gt_tensors, None, None, styleFlag=True), style) + \
styleRecCriterion(synthModel(images_recon_1, None, None, styleFlag=True), style) + \
styleRecCriterion(synthModel(images_recon_2, None, None, styleFlag=True), style))
infer_time += forward_time
valid_loss_avg_ocr.add(ocrCost_ocr)
valid_loss_avg.add(opt.ocrWeight*(0.5*(ocrCost_1+ocrCost_2))+opt.reconWeight*recCost+opt.disWeight*disGenCost+opt.styleReconWeight*styleRecCost)
valid_loss_avg_dis.add(opt.disWeight*disCost)
#fine grained losses
valid_loss_avg_ocrRecon_1.add(opt.ocrWeight*(0.5*(ocrCost_1)))
valid_loss_avg_ocrRecon_2.add(opt.ocrWeight*(0.5*(ocrCost_2)))
valid_loss_avg_gen.add(opt.disWeight*disGenCost)
valid_loss_avg_imgRecon.add(opt.reconWeight*recCost)
valid_loss_avg_styRecon.add(opt.styleReconWeight*styleRecCost)
# if not opt.ocrFixed:
# calculate accuracy & confidence score
preds_prob_ocr = F.softmax(preds_ocr, dim=2)
preds_max_prob_ocr, _ = preds_prob_ocr.max(dim=2)
preds_prob_1 = F.softmax(preds_1, dim=2)
preds_max_prob_1, _ = preds_prob_1.max(dim=2)
preds_prob_2 = F.softmax(preds_2, dim=2)
preds_max_prob_2, _ = preds_prob_2.max(dim=2)
confidence_score_list_ocr = []
confidence_score_list_1 = []
confidence_score_list_2 = []
# zCntr=0
for gt_ocr, pred_ocr, pred_max_prob_ocr, gt_1, pred_1, pred_max_prob_1, gt_2, pred_2, pred_max_prob_2 in zip(labels_gt, preds_str_ocr, preds_max_prob_ocr, labels_1, preds_str_1, preds_max_prob_1, labels_2, preds_str_2, preds_max_prob_2):
if 'Attn' in opt.Prediction:
# if not opt.ocrFixed:
# gt_ocr = gt_ocr[:gt_ocr.find('[s]')]
pred_EOS = pred_ocr.find('[s]')
pred_ocr = pred_ocr[:pred_EOS] # prune after "end of sentence" token ([s])
pred_max_prob_ocr = pred_max_prob_ocr[:pred_EOS]
# gt_1 = gt_1[:gt_1.find('[s]')]
pred_EOS = pred_1.find('[s]')
pred_1 = pred_1[:pred_EOS] # prune after "end of sentence" token ([s])
pred_max_prob_1 = pred_max_prob_1[:pred_EOS]
# gt_2 = gt_2[:gt_2.find('[s]')]
pred_EOS = pred_2.find('[s]')
pred_2 = pred_2[:pred_EOS] # prune after "end of sentence" token ([s])
pred_max_prob_2 = pred_max_prob_2[:pred_EOS]
# # To evaluate 'case sensitive model' with alphanumeric and case insensitve setting.
# if opt.sensitive and opt.data_filtering_off:
# pred = pred.lower()
# gt = gt.lower()
# alphanumeric_case_insensitve = '0123456789abcdefghijklmnopqrstuvwxyz'
# out_of_alphanumeric_case_insensitve = f'[^{alphanumeric_case_insensitve}]'
# pred = re.sub(out_of_alphanumeric_case_insensitve, '', pred)
# gt = re.sub(out_of_alphanumeric_case_insensitve, '', gt)
if pred_ocr == gt_ocr:
n_correct_ocr += 1
# else:
# n_correct_ocr=0
if pred_1 == gt_1:
n_correct_1 += 1
if pred_2 == gt_2:
n_correct_2 += 1
'''
(old version) ICDAR2017 DOST Normalized Edit Distance https://rrc.cvc.uab.es/?ch=7&com=tasks
"For each word we calculate the normalized edit distance to the length of the ground truth transcription."
if len(gt) == 0:
norm_ED += 1
else:
norm_ED += edit_distance(pred, gt) / len(gt)
'''
# ICDAR2019 Normalized Edit Distance
if len(gt_1) == 0 or len(pred_1) == 0:
norm_ED_1 += 0
elif len(gt_1) > len(pred_1):
norm_ED_1 += 1 - edit_distance(pred_1, gt_1) / len(gt_1)
else:
norm_ED_1 += 1 - edit_distance(pred_1, gt_1) / len(pred_1)
# ICDAR2019 Normalized Edit Distance
if len(gt_2) == 0 or len(pred_2) == 0:
norm_ED_2 += 0
elif len(gt_2) > len(pred_2):
norm_ED_2 += 1 - edit_distance(pred_2, gt_2) / len(gt_2)
else:
norm_ED_2 += 1 - edit_distance(pred_2, gt_2) / len(pred_2)
# if not opt.ocrFixed:
# ICDAR2019 Normalized Edit Distance
if len(gt_ocr) == 0 or len(pred_ocr) == 0:
norm_ED_ocr += 0
elif len(gt_ocr) > len(pred_ocr):
norm_ED_ocr += 1 - edit_distance(pred_ocr, gt_ocr) / len(gt_ocr)
else:
norm_ED_ocr += 1 - edit_distance(pred_ocr, gt_ocr) / len(pred_ocr)
# else:
# norm_ED_ocr=0
# calculate confidence score (= multiply of pred_max_prob)
try:
# if not opt.ocrFixed:
confidence_score_ocr = pred_max_prob_ocr.cumprod(dim=0)[-1]
# else:
# confidence_score_ocr = 1.0
confidence_score_1 = pred_max_prob_1.cumprod(dim=0)[-1]
confidence_score_2 = pred_max_prob_2.cumprod(dim=0)[-1]
except:
confidence_score_ocr = 0
confidence_score_1 = 0 # for empty pred case, when prune after "end of sentence" token ([s])
confidence_score_2 = 0 # for empty pred case, when prune after "end of sentence" token ([s])
confidence_score_list_ocr.append(confidence_score_ocr)
confidence_score_list_1.append(confidence_score_1)
confidence_score_list_2.append(confidence_score_2)
# print(pred, gt, pred==gt, confidence_score)
# zCntr+=1
#Save random reconstructed image and write its gt
if opt.testFlag:
randomSaveIdx = list(range(0,batch_size))
else:
randomSaveIdx = [random.randint(0,batch_size-1)]
for rIdx in randomSaveIdx:
if 'Attn' in opt.Prediction:
r_pred_EOS = preds_str_ocr[rIdx].find('[s]')
r_pred_ocr = preds_str_ocr[rIdx][:r_pred_EOS]
r_pred_1_EOS = preds_str_1[rIdx].find('[s]')
r_pred_1 = preds_str_1[rIdx][:r_pred_1_EOS]
r_pred_2_EOS = preds_str_2[rIdx].find('[s]')
r_pred_2 = preds_str_2[rIdx][:r_pred_2_EOS]
else:
r_pred_ocr = preds_str_ocr[rIdx]
r_pred_1 = preds_str_1[rIdx]
r_pred_2 = preds_str_2[rIdx]
try:
save_image(tensor2im(image_input_tensors[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_sInput_'+labels_gt[rIdx]+'_'+r_pred_ocr+'.png'))
save_image(tensor2im(image_gt_tensors[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_csInput_'+labels_2[rIdx]+'_'+'xxx'+'.png'))
save_image(tensor2im(images_recon_1[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_sRecon_'+labels_1[rIdx]+'_'+r_pred_1+'.png'))
save_image(tensor2im(images_recon_2[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_csRecon_'+labels_2[rIdx]+'_'+r_pred_2+'.png'))
except:
print('Warning while saving validation image')
accuracy_ocr = n_correct_ocr / float(length_of_data) * 100
norm_ED_ocr = norm_ED_ocr / float(length_of_data) # ICDAR2019 Normalized Edit Distance
accuracy_1 = n_correct_1 / float(length_of_data) * 100
norm_ED_1 = norm_ED_1 / float(length_of_data) # ICDAR2019 Normalized Edit Distance
accuracy_2 = n_correct_2 / float(length_of_data) * 100
norm_ED_2 = norm_ED_2 / float(length_of_data) # ICDAR2019 Normalized Edit Distance
random.seed()
return [valid_loss_avg_ocr.val(), valid_loss_avg.val(), valid_loss_avg_dis.val(), valid_loss_avg_ocrRecon_1.val(),valid_loss_avg_ocrRecon_2.val(), valid_loss_avg_gen.val(), valid_loss_avg_imgRecon.val(), valid_loss_avg_styRecon.val()], [accuracy_ocr,accuracy_1,accuracy_2], [norm_ED_ocr,norm_ED_1,norm_ED_2], [preds_str_ocr, preds_str_1,preds_str_2], [confidence_score_list_ocr,confidence_score_list_1,confidence_score_list_2], [labels_gt,labels_1,labels_2], infer_time, length_of_data
def validation_synth_v3(iterCntr, styleModel, genModel, vggModel, disModel, recCriterion, evaluation_loader, converter, opt):
""" validation or evaluation """
os.makedirs(os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr)), exist_ok=True)
random.seed(1024)
length_of_data = 0
infer_time = 0
valid_loss_avg = Averager()
valid_loss_avg_dis = Averager()
valid_loss_avg_gen = Averager()
valid_loss_avg_imgRecon = Averager()
valid_loss_avg_vgg_per = Averager()
valid_loss_avg_vgg_sty = Averager()
lexicons=[]
out_of_char = f'[^{opt.character}]'
# #read lexicons file
# with open(opt.lexFile,'r') as lexF:
# for line in lexF:
# lexWord = line[:-1]
# if opt.fixedString and len(lexWord)!=opt.batch_exact_length:
# continue
# if len(lexWord) <= opt.batch_max_length and not(re.search(out_of_char, lexWord.lower())) and len(lexWord) >= opt.batch_min_length:
# lexicons.append(lexWord)
for i, (image_input_tensors, image_gt_tensors, labels_1, labels_2) in enumerate(evaluation_loader):
if opt.debugFlag and i>0:
break
# disCnt = int(image_tensors_all.size(0)/2)
labels_gt = labels_1
image_input_tensors = image_input_tensors.to(device)
image_gt_tensors = image_gt_tensors.to(device)
batch_size = image_input_tensors.size(0)
length_of_data = length_of_data + batch_size
# For max length prediction
# length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
# text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
text_for_loss_2, length_for_loss_2 = converter.encode(labels_2, batch_max_length=opt.batch_max_length)
start_time = time.time()
if image_input_tensors.shape[0] == 0:
continue
style = styleModel(image_input_tensors)
images_recon_2 = genModel(style, text_for_loss_2)
forward_time = time.time() - start_time
if disModel == None:
disCost = torch.tensor(0.0)
disGenCost = torch.tensor(0.0)
else:
if opt.gan_type == 'wgan':
disCost = torch.tensor(0.0)
else:
disCost = disModel.module.calc_dis_loss(torch.cat((images_recon_2.detach(),image_input_tensors),dim=1), torch.cat((image_gt_tensors,image_input_tensors),dim=1))
disGenCost = disModel.module.calc_gen_loss(torch.cat((images_recon_2,image_input_tensors),dim=1))
if opt.imgReconLoss == 'ssim':
recCost = -1*recCriterion(images_recon_1,image, val_range=2)
elif opt.imgReconLoss == 'ms-ssim':
recCost = -1*recCriterion(images_recon_1,image, val_range=2, normalize='relu')
else:
recCost = recCriterion(images_recon_2,image_gt_tensors)
vggPerCost, vggStyleCost = vggModel(image_gt_tensors, images_recon_2)
infer_time += forward_time
valid_loss_avg.add(opt.reconWeight*recCost + opt.disWeight*disGenCost + opt.vggPerWeight*vggPerCost + opt.vggStyWeight*vggStyleCost)
valid_loss_avg_dis.add(opt.disWeight*disCost)
#fine grained losses
valid_loss_avg_gen.add(opt.disWeight*disGenCost)
valid_loss_avg_imgRecon.add(opt.reconWeight*recCost)
valid_loss_avg_vgg_per.add(opt.vggPerWeight*vggPerCost)
valid_loss_avg_vgg_sty.add(opt.vggStyWeight*vggStyleCost)
#Save random reconstructed image and write its gt
if opt.testFlag:
randomSaveIdx = list(range(0,batch_size))
else:
randomSaveIdx = [random.randint(0,batch_size-1)]
for rIdx in randomSaveIdx:
try:
save_image(tensor2im(image_input_tensors[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_sInput_'+labels_gt[rIdx]+'.png'))
save_image(tensor2im(image_gt_tensors[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_csInput_'+labels_2[rIdx]+'.png'))
save_image(tensor2im(images_recon_2[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_csRecon_'+labels_2[rIdx]+'.png'))
except:
print('Warning while saving validation image')
random.seed()
return [valid_loss_avg.val(), valid_loss_avg_dis.val(), valid_loss_avg_gen.val(), valid_loss_avg_imgRecon.val(), valid_loss_avg_vgg_per.val(), valid_loss_avg_vgg_sty.val()], infer_time, length_of_data
def validation_synth_v4(iterCntr, styleModel, genModel, vggModel, ocrModel, disModel, recCriterion, ocrCriterion, evaluation_loader, converter, opt):
""" validation or evaluation """
os.makedirs(os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr)), exist_ok=True)
random.seed(1024)
length_of_data = 0
infer_time = 0
valid_loss_avg = Averager()
valid_loss_avg_dis = Averager()
valid_loss_avg_gen = Averager()
valid_loss_avg_imgRecon = Averager()
valid_loss_avg_vgg_per = Averager()
valid_loss_avg_vgg_sty = Averager()
valid_loss_avg_ocr = Averager()
lexicons=[]
out_of_char = f'[^{opt.character}]'
# #read lexicons file
# with open(opt.lexFile,'r') as lexF:
# for line in lexF:
# lexWord = line[:-1]
# if opt.fixedString and len(lexWord)!=opt.batch_exact_length:
# continue
# if len(lexWord) <= opt.batch_max_length and not(re.search(out_of_char, lexWord.lower())) and len(lexWord) >= opt.batch_min_length:
# lexicons.append(lexWord)
for i, (image_input_tensors, image_gt_tensors, labels_1, labels_2) in enumerate(evaluation_loader):
if opt.debugFlag and i>0:
break
# disCnt = int(image_tensors_all.size(0)/2)
labels_gt = labels_1
image_input_tensors = image_input_tensors.to(device)
image_gt_tensors = image_gt_tensors.to(device)
batch_size = image_input_tensors.size(0)
length_of_data = length_of_data + batch_size
# For max length prediction
# length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
# text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
text_for_loss_2, length_for_loss_2 = converter.encode(labels_2, batch_max_length=opt.batch_max_length)
start_time = time.time()
if image_input_tensors.shape[0] == 0:
continue
style = styleModel(image_input_tensors)
images_recon_2 = genModel(style, text_for_loss_2)
forward_time = time.time() - start_time
if disModel == None:
disCost = torch.tensor(0.0)
disGenCost = torch.tensor(0.0)
else:
if opt.gan_type == 'wgan':
disCost = torch.tensor(0.0)
else:
disCost = disModel.module.calc_dis_loss(torch.cat((images_recon_2.detach(),image_input_tensors),dim=1), torch.cat((image_gt_tensors,image_input_tensors),dim=1))
disGenCost = disModel.module.calc_gen_loss(torch.cat((images_recon_2,image_input_tensors),dim=1))
if opt.imgReconLoss == 'ssim':
recCost = -1*recCriterion(images_recon_1,image, val_range=2)
elif opt.imgReconLoss == 'ms-ssim':
recCost = -1*recCriterion(images_recon_1,image, val_range=2, normalize='relu')
else:
recCost = recCriterion(images_recon_2,image_gt_tensors)
vggPerCost, vggStyleCost = vggModel(image_gt_tensors, images_recon_2)
#ocr loss
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
if opt.contentLoss == 'vis' or opt.contentLoss == 'seq':
preds_recon = ocrModel(images_recon_2, text_for_pred, is_train=False, returnFeat=opt.contentLoss)
preds_gt = ocrModel(image_gt_tensors, text_for_pred, is_train=False, returnFeat=opt.contentLoss)
ocrCost = ocrCriterion(preds_recon, preds_gt)
else:
if 'CTC' in opt.Prediction:
preds_recon = ocrModel(images_recon_2, text_for_pred, is_train=False)
preds_size = torch.IntTensor([preds_recon.size(1)] * batch_size)
preds_recon = preds_recon.log_softmax(2).permute(1, 0, 2)
ocrCost = ocrCriterion(preds_recon, text_for_loss_2, preds_size, length_2)
else:
preds_recon = ocrModel(images_recon_2, text_for_pred[:, :-1], is_train=False) # align with Attention.forward
target_2 = text_for_loss_2[:, 1:] # without [GO] Symbol
ocrCost = ocrCriterion(preds_recon.view(-1, preds_recon.shape[-1]), target_2.contiguous().view(-1))
infer_time += forward_time
valid_loss_avg.add(opt.reconWeight*recCost + opt.disWeight*disGenCost + opt.vggPerWeight*vggPerCost + opt.vggStyWeight*vggStyleCost + opt.ocrWeight*ocrCost)
valid_loss_avg_dis.add(opt.disWeight*disCost)
#fine grained losses
valid_loss_avg_gen.add(opt.disWeight*disGenCost)
valid_loss_avg_imgRecon.add(opt.reconWeight*recCost)
valid_loss_avg_vgg_per.add(opt.vggPerWeight*vggPerCost)
valid_loss_avg_vgg_sty.add(opt.vggStyWeight*vggStyleCost)
valid_loss_avg_ocr.add(opt.ocrWeight*ocrCost)
#Save random reconstructed image and write its gt
if opt.testFlag:
randomSaveIdx = list(range(0,batch_size))
else:
randomSaveIdx = [random.randint(0,batch_size-1)]
for rIdx in randomSaveIdx:
try:
save_image(tensor2im(image_input_tensors[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_sInput_'+labels_gt[rIdx]+'.png'))
save_image(tensor2im(image_gt_tensors[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_csInput_'+labels_2[rIdx]+'.png'))
save_image(tensor2im(images_recon_2[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_csRecon_'+labels_2[rIdx]+'.png'))
except:
print('Warning while saving validation image')
random.seed()
return [valid_loss_avg.val(), valid_loss_avg_dis.val(), valid_loss_avg_gen.val(), valid_loss_avg_imgRecon.val(), valid_loss_avg_vgg_per.val(), valid_loss_avg_vgg_sty.val(), valid_loss_avg_ocr.val()], infer_time, length_of_data
def validation_synth_v5(iterCntr, styleModel, genModel, mixModel, vggModel, ocrModel, disModel, recCriterion, ocrCriterion, evaluation_loader, converter, opt):
""" validation or evaluation """
os.makedirs(os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr)), exist_ok=True)
random.seed(1024)
length_of_data = 0
infer_time = 0
valid_loss_avg = Averager()
valid_loss_avg_dis = Averager()
valid_loss_avg_gen = Averager()
valid_loss_avg_imgRecon = Averager()
valid_loss_avg_vgg_per = Averager()
valid_loss_avg_vgg_sty = Averager()
valid_loss_avg_ocr = Averager()
lexicons=[]
out_of_char = f'[^{opt.character}]'
# #read lexicons file
# with open(opt.lexFile,'r') as lexF:
# for line in lexF:
# lexWord = line[:-1]
# if opt.fixedString and len(lexWord)!=opt.batch_exact_length:
# continue
# if len(lexWord) <= opt.batch_max_length and not(re.search(out_of_char, lexWord.lower())) and len(lexWord) >= opt.batch_min_length:
# lexicons.append(lexWord)
for i, (image_input_tensors, image_gt_tensors, labels_1, labels_2) in enumerate(evaluation_loader):
if opt.debugFlag and i>5:
break
# disCnt = int(image_tensors_all.size(0)/2)
labels_gt = labels_1
image_input_tensors = image_input_tensors.to(device)
image_gt_tensors = image_gt_tensors.to(device)
batch_size = image_input_tensors.size(0)
length_of_data = length_of_data + batch_size
# For max length prediction
# length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
# text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
text_for_loss_2, length_for_loss_2 = converter.encode(labels_2, batch_max_length=opt.batch_max_length)
start_time = time.time()
if image_input_tensors.shape[0] == 0:
continue
style = styleModel(image_input_tensors).squeeze(2).squeeze(2)
scInput = mixModel(style,text_for_loss_2)
images_recon_2, _ = genModel([scInput], input_is_latent=opt.input_latent)
forward_time = time.time() - start_time
if disModel == None:
disCost = torch.tensor(0.0)
disGenCost = torch.tensor(0.0)
else:
if opt.gan_type == 'wgan':
disCost = torch.tensor(0.0)
else:
# disCost = disModel.module.calc_dis_loss(torch.cat((images_recon_2.detach(),image_input_tensors),dim=1), torch.cat((image_gt_tensors,image_input_tensors),dim=1))
fake_pred = disModel(torch.cat((images_recon_2,image_input_tensors),dim=1))
real_pred = disModel(torch.cat((image_gt_tensors,image_input_tensors),dim=1))
disCost = d_logistic_loss(real_pred, fake_pred)
# disGenCost = disModel.module.calc_gen_loss(torch.cat((images_recon_2,image_input_tensors),dim=1))
fake_pred = disModel(torch.cat((images_recon_2,image_input_tensors),dim=1))
disGenCost = g_nonsaturating_loss(fake_pred)
if opt.imgReconLoss == 'ssim':
recCost = -1*recCriterion(images_recon_1,image, val_range=2)
elif opt.imgReconLoss == 'ms-ssim':
recCost = -1*recCriterion(images_recon_1,image, val_range=2, normalize='relu')
else:
recCost = recCriterion(images_recon_2,image_gt_tensors)
vggPerCost, vggStyleCost = vggModel(image_gt_tensors, images_recon_2)
#ocr loss
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
if opt.contentLoss == 'vis' or opt.contentLoss == 'seq':
preds_recon = ocrModel(images_recon_2, text_for_pred, is_train=False, returnFeat=opt.contentLoss)
preds_gt = ocrModel(image_gt_tensors, text_for_pred, is_train=False, returnFeat=opt.contentLoss)
ocrCost = ocrCriterion(preds_recon, preds_gt)
else:
if 'CTC' in opt.Prediction:
preds_recon = ocrModel(images_recon_2, text_for_pred, is_train=False)
preds_size = torch.IntTensor([preds_recon.size(1)] * batch_size)
preds_recon = preds_recon.log_softmax(2).permute(1, 0, 2)
ocrCost = ocrCriterion(preds_recon, text_for_loss_2, preds_size, length_2)
else:
preds_recon = ocrModel(images_recon_2, text_for_pred[:, :-1], is_train=False) # align with Attention.forward
target_2 = text_for_loss_2[:, 1:] # without [GO] Symbol
ocrCost = ocrCriterion(preds_recon.view(-1, preds_recon.shape[-1]), target_2.contiguous().view(-1))
infer_time += forward_time
valid_loss_avg.add(opt.reconWeight*recCost + opt.disWeight*disGenCost + opt.vggPerWeight*vggPerCost + opt.vggStyWeight*vggStyleCost + opt.ocrWeight*ocrCost)
valid_loss_avg_dis.add(opt.disWeight*disCost)
#fine grained losses
valid_loss_avg_gen.add(opt.disWeight*disGenCost)
valid_loss_avg_imgRecon.add(opt.reconWeight*recCost)
valid_loss_avg_vgg_per.add(opt.vggPerWeight*vggPerCost)
valid_loss_avg_vgg_sty.add(opt.vggStyWeight*vggStyleCost)
valid_loss_avg_ocr.add(opt.ocrWeight*ocrCost)
#Save random reconstructed image and write its gt
if opt.testFlag:
randomSaveIdx = list(range(0,batch_size))
else:
randomSaveIdx = [random.randint(0,batch_size-1)]
for rIdx in randomSaveIdx:
try:
save_image(tensor2im(image_input_tensors[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_sInput_'+labels_gt[rIdx]+'.png'))
save_image(tensor2im(image_gt_tensors[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_csInput_'+labels_2[rIdx]+'.png'))
save_image(tensor2im(images_recon_2[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_csRecon_'+labels_2[rIdx]+'.png'))
except:
print('Warning while saving validation image')
random.seed()
return [valid_loss_avg.val(), valid_loss_avg_dis.val(), valid_loss_avg_gen.val(), valid_loss_avg_imgRecon.val(), valid_loss_avg_vgg_per.val(), valid_loss_avg_vgg_sty.val(), valid_loss_avg_ocr.val()], infer_time, length_of_data
def validation_synth_v6(iterCntr, genModel, ocrModel, disModel, ocrCriterion, evaluation_loader, converter, opt):
""" validation or evaluation """
os.makedirs(os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr)), exist_ok=True)
random.seed(1024)
length_of_data = 0
infer_time = 0
valid_loss_avg = Averager()
valid_loss_avg_dis = Averager()
valid_loss_avg_gen = Averager()
valid_loss_avg_imgRecon = Averager()
valid_loss_avg_vgg_per = Averager()
valid_loss_avg_vgg_sty = Averager()
valid_loss_avg_ocr = Averager()
lexicons=[]
out_of_char = f'[^{opt.character}]'
# #read lexicons file
# with open(opt.lexFile,'r') as lexF:
# for line in lexF:
# lexWord = line[:-1]
# if opt.fixedString and len(lexWord)!=opt.batch_exact_length:
# continue
# if len(lexWord) <= opt.batch_max_length and not(re.search(out_of_char, lexWord.lower())) and len(lexWord) >= opt.batch_min_length:
# lexicons.append(lexWord)
for i, (image_input_tensors, image_gt_tensors, labels_1, labels_2) in enumerate(evaluation_loader):
if opt.debugFlag and i>5:
break
# disCnt = int(image_tensors_all.size(0)/2)
labels_gt = labels_1
image_input_tensors = image_input_tensors.to(device)
image_gt_tensors = image_gt_tensors.to(device)
batch_size = image_input_tensors.size(0)
length_of_data = length_of_data + batch_size
# For max length prediction
# length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
# text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
text_for_loss_2, length_for_loss_2 = converter.encode(labels_2, batch_max_length=opt.batch_max_length)
start_time = time.time()
if image_input_tensors.shape[0] == 0:
continue
# style = styleModel(image_input_tensors).squeeze(2).squeeze(2)
# scInput = mixModel(style,text_for_loss_2)
style = mixing_noise(batch_size, opt.latent, opt.mixing, device)
# images_recon_2, _ = genModel(style, text_for_loss_2, input_is_latent=opt.input_latent)
if 'CTC' in opt.Prediction:
images_recon_2, _ = genModel(style, text_for_loss_2, input_is_latent=opt.input_latent)
else:
images_recon_2, _ = genModel(style, text_for_loss_2[:,1:-1], input_is_latent=opt.input_latent)
forward_time = time.time() - start_time
if disModel == None:
disCost = torch.tensor(0.0)
disGenCost = torch.tensor(0.0)
else:
if opt.gan_type == 'wgan':
disCost = torch.tensor(0.0)
else:
# disCost = disModel.module.calc_dis_loss(torch.cat((images_recon_2.detach(),image_input_tensors),dim=1), torch.cat((image_gt_tensors,image_input_tensors),dim=1))
# fake_pred = disModel(torch.cat((images_recon_2,image_input_tensors),dim=1))
# real_pred = disModel(torch.cat((image_gt_tensors,image_input_tensors),dim=1))
fake_pred = disModel(images_recon_2)
real_pred = disModel(image_gt_tensors)
disCost = d_logistic_loss(real_pred, fake_pred)
# disGenCost = disModel.module.calc_gen_loss(torch.cat((images_recon_2,image_input_tensors),dim=1))
# fake_pred = disModel(torch.cat((images_recon_2,image_input_tensors),dim=1))
fake_pred = disModel(images_recon_2)
disGenCost = g_nonsaturating_loss(fake_pred)
# if opt.imgReconLoss == 'ssim':
# recCost = -1*recCriterion(images_recon_1,image, val_range=2)
# elif opt.imgReconLoss == 'ms-ssim':
# recCost = -1*recCriterion(images_recon_1,image, val_range=2, normalize='relu')
# else:
# recCost = recCriterion(images_recon_2,image_gt_tensors)
# vggPerCost, vggStyleCost = vggModel(image_gt_tensors, images_recon_2)
#ocr loss
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
if opt.contentLoss == 'vis' or opt.contentLoss == 'seq':
preds_recon = ocrModel(images_recon_2, text_for_pred, is_train=False, returnFeat=opt.contentLoss)
preds_gt = ocrModel(image_gt_tensors, text_for_pred, is_train=False, returnFeat=opt.contentLoss)
ocrCost = ocrCriterion(preds_recon, preds_gt)
else:
if 'CTC' in opt.Prediction:
preds_recon = ocrModel(images_recon_2, text_for_pred, is_train=False)
preds_size = torch.IntTensor([preds_recon.size(1)] * batch_size)
preds_recon = preds_recon.log_softmax(2).permute(1, 0, 2)
ocrCost = ocrCriterion(preds_recon, text_for_loss_2, preds_size, length_for_loss_2)
else:
preds_recon = ocrModel(images_recon_2, text_for_pred[:, :-1], is_train=False) # align with Attention.forward
target_2 = text_for_loss_2[:, 1:] # without [GO] Symbol
ocrCost = ocrCriterion(preds_recon.view(-1, preds_recon.shape[-1]), target_2.contiguous().view(-1))
infer_time += forward_time
# valid_loss_avg.add(opt.reconWeight*recCost + opt.disWeight*disGenCost + opt.vggPerWeight*vggPerCost + opt.vggStyWeight*vggStyleCost + opt.ocrWeight*ocrCost)
valid_loss_avg.add(opt.disWeight*disGenCost + opt.ocrWeight*ocrCost)
valid_loss_avg_dis.add(opt.disWeight*disCost)
#fine grained losses
valid_loss_avg_gen.add(opt.disWeight*disGenCost)
valid_loss_avg_imgRecon.add(torch.tensor(0.0))
valid_loss_avg_vgg_per.add(torch.tensor(0.0))
valid_loss_avg_vgg_sty.add(torch.tensor(0.0))
valid_loss_avg_ocr.add(opt.ocrWeight*ocrCost)
#Save random reconstructed image and write its gt
if opt.testFlag:
randomSaveIdx = list(range(0,batch_size))
else:
randomSaveIdx = [random.randint(0,batch_size-1)]
for rIdx in randomSaveIdx:
try:
save_image(tensor2im(image_input_tensors[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_sInput_'+labels_gt[rIdx]+'.png'))
save_image(tensor2im(image_gt_tensors[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_csInput_'+labels_2[rIdx]+'.png'))
save_image(tensor2im(images_recon_2[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_csRecon_'+labels_2[rIdx]+'.png'))
except:
print('Warning while saving validation image')
random.seed()
return [valid_loss_avg.val(), valid_loss_avg_dis.val(), valid_loss_avg_gen.val(), valid_loss_avg_imgRecon.val(), valid_loss_avg_vgg_per.val(), valid_loss_avg_vgg_sty.val(), valid_loss_avg_ocr.val()], infer_time, length_of_data
def validation_synth_v7(iterCntr, genModel, ocrModel, disModel, ocrCriterion, evaluation_loader, converter, opt):
""" validation or evaluation """
os.makedirs(os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr)), exist_ok=True)
random.seed(1024)
length_of_data = 0
infer_time = 0
valid_loss_avg = Averager()
valid_loss_avg_dis = Averager()
valid_loss_avg_gen = Averager()
valid_loss_avg_imgRecon = Averager()
valid_loss_avg_vgg_per = Averager()
valid_loss_avg_vgg_sty = Averager()
valid_loss_avg_ocr = Averager()
lexicons=[]
out_of_char = f'[^{opt.character}]'
# #read lexicons file
# with open(opt.lexFile,'r') as lexF:
# for line in lexF:
# lexWord = line[:-1]
# if opt.fixedString and len(lexWord)!=opt.batch_exact_length:
# continue
# if len(lexWord) <= opt.batch_max_length and not(re.search(out_of_char, lexWord.lower())) and len(lexWord) >= opt.batch_min_length:
# lexicons.append(lexWord)
for i, (image_input_tensors, image_gt_tensors, labels_1, labels_2, synthimg_labels_2_tensors) in enumerate(evaluation_loader):
if opt.debugFlag and i>5:
break
# disCnt = int(image_tensors_all.size(0)/2)
labels_gt = labels_1
image_input_tensors = image_input_tensors.to(device)
image_gt_tensors = image_gt_tensors.to(device)
synthimg_labels_2_tensors = synthimg_labels_2_tensors.to(device)
batch_size = image_input_tensors.size(0)
length_of_data = length_of_data + batch_size
# For max length prediction
# length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
# text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
text_for_loss_2, length_for_loss_2 = converter.encode(labels_2, batch_max_length=opt.batch_max_length)
start_time = time.time()
if image_input_tensors.shape[0] == 0:
continue
# style = styleModel(image_input_tensors).squeeze(2).squeeze(2)
# scInput = mixModel(style,text_for_loss_2)
style = mixing_noise(batch_size, opt.latent, opt.mixing, device)
# images_recon_2, _ = genModel(style, text_for_loss_2, input_is_latent=opt.input_latent)
images_recon_2, _ = genModel(style, synthimg_labels_2_tensors, input_is_latent=opt.input_latent)
forward_time = time.time() - start_time
if disModel == None:
disCost = torch.tensor(0.0)
disGenCost = torch.tensor(0.0)
else:
if opt.gan_type == 'wgan':
disCost = torch.tensor(0.0)
else:
# disCost = disModel.module.calc_dis_loss(torch.cat((images_recon_2.detach(),image_input_tensors),dim=1), torch.cat((image_gt_tensors,image_input_tensors),dim=1))
# fake_pred = disModel(torch.cat((images_recon_2,image_input_tensors),dim=1))
# real_pred = disModel(torch.cat((image_gt_tensors,image_input_tensors),dim=1))
fake_pred = disModel(images_recon_2)
real_pred = disModel(image_gt_tensors)
disCost = d_logistic_loss(real_pred, fake_pred)
# disGenCost = disModel.module.calc_gen_loss(torch.cat((images_recon_2,image_input_tensors),dim=1))
# fake_pred = disModel(torch.cat((images_recon_2,image_input_tensors),dim=1))
fake_pred = disModel(images_recon_2)
disGenCost = g_nonsaturating_loss(fake_pred)
# if opt.imgReconLoss == 'ssim':
# recCost = -1*recCriterion(images_recon_1,image, val_range=2)
# elif opt.imgReconLoss == 'ms-ssim':
# recCost = -1*recCriterion(images_recon_1,image, val_range=2, normalize='relu')
# else:
# recCost = recCriterion(images_recon_2,image_gt_tensors)
# vggPerCost, vggStyleCost = vggModel(image_gt_tensors, images_recon_2)
#ocr loss
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
if opt.contentLoss == 'vis' or opt.contentLoss == 'seq':
preds_recon = ocrModel(images_recon_2, text_for_pred, is_train=False, returnFeat=opt.contentLoss)
preds_gt = ocrModel(image_gt_tensors, text_for_pred, is_train=False, returnFeat=opt.contentLoss)
ocrCost = ocrCriterion(preds_recon, preds_gt)
else:
if 'CTC' in opt.Prediction:
preds_recon = ocrModel(images_recon_2, text_for_pred, is_train=False)
preds_size = torch.IntTensor([preds_recon.size(1)] * batch_size)
preds_recon = preds_recon.log_softmax(2).permute(1, 0, 2)
ocrCost = ocrCriterion(preds_recon, text_for_loss_2, preds_size, length_for_loss_2)
else:
preds_recon = ocrModel(images_recon_2, text_for_pred[:, :-1], is_train=False) # align with Attention.forward
target_2 = text_for_loss_2[:, 1:] # without [GO] Symbol
ocrCost = ocrCriterion(preds_recon.view(-1, preds_recon.shape[-1]), target_2.contiguous().view(-1))
infer_time += forward_time
# valid_loss_avg.add(opt.reconWeight*recCost + opt.disWeight*disGenCost + opt.vggPerWeight*vggPerCost + opt.vggStyWeight*vggStyleCost + opt.ocrWeight*ocrCost)
valid_loss_avg.add(opt.disWeight*disGenCost + opt.ocrWeight*ocrCost)
valid_loss_avg_dis.add(opt.disWeight*disCost)
#fine grained losses
valid_loss_avg_gen.add(opt.disWeight*disGenCost)
valid_loss_avg_imgRecon.add(torch.tensor(0.0))
valid_loss_avg_vgg_per.add(torch.tensor(0.0))
valid_loss_avg_vgg_sty.add(torch.tensor(0.0))
valid_loss_avg_ocr.add(opt.ocrWeight*ocrCost)
#Save random reconstructed image and write its gt
if opt.testFlag:
randomSaveIdx = list(range(0,batch_size))
else:
randomSaveIdx = [random.randint(0,batch_size-1)]
for rIdx in randomSaveIdx:
try:
save_image(tensor2im(image_input_tensors[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_sInput_'+labels_gt[rIdx]+'.png'))
save_image(tensor2im(image_gt_tensors[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_csInput_'+labels_2[rIdx]+'.png'))
save_image(tensor2im(images_recon_2[rIdx]),os.path.join(opt.exp_dir,opt.exp_name,'valImages',str(iterCntr),str(i)+'_'+str(rIdx)+'_'+'_csRecon_'+labels_2[rIdx]+'.png'))
except:
print('Warning while saving validation image')
random.seed()
return [valid_loss_avg.val(), valid_loss_avg_dis.val(), valid_loss_avg_gen.val(), valid_loss_avg_imgRecon.val(), valid_loss_avg_vgg_per.val(), valid_loss_avg_vgg_sty.val(), valid_loss_avg_ocr.val()], infer_time, length_of_data
def test(opt):
""" model configuration """
if 'CTC' in opt.Prediction:
converter = CTCLabelConverter(opt.character)
else:
converter = AttnLabelConverter(opt.character)
opt.num_class = len(converter.character)
if opt.rgb:
opt.input_channel = 3
model = AdaINGen(opt)
ocrModel = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,
opt.SequenceModeling, opt.Prediction)
model = torch.nn.DataParallel(model).to(device)
ocrModel = torch.nn.DataParallel(ocrModel).to(device)
# load model
print('loading pretrained ocr model from %s' % opt.saved_ocr_model)
ocrModel.load_state_dict(torch.load(opt.saved_ocr_model, map_location=device))
print('loading pretrained synth model from %s' % opt.saved_synth_model)
model.load_state_dict(torch.load(opt.saved_synth_model, map_location=device))
# opt.exp_name = '_'.join(opt.saved_model.split('/')[1:])
os.makedirs(os.path.join(opt.exp_dir,opt.exp_name), exist_ok=True)
os.makedirs(os.path.join(opt.exp_dir,opt.exp_name,'evalImages'), exist_ok=True)
print(model)
print(ocrModel)
""" keep evaluation model and result logs """
# os.makedirs(f'./result/{opt.exp_name}', exist_ok=True)
# os.system(f'cp {opt.saved_model} ./result/{opt.exp_name}/')
""" setup loss """
if 'CTC' in opt.Prediction:
ocrCriterion = torch.nn.CTCLoss(zero_infinity=True).to(device)
else:
ocrCriterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) # ignore [GO] token = ignore index 0
recCriterion = torch.nn.L1Loss()
styleRecCriterion = torch.nn.L1Loss()
""" evaluation """
model.eval()
ocrModel.eval()
with torch.no_grad():
if opt.benchmark_all_eval: # evaluation with 10 benchmark evaluation datasets
benchmark_all_eval(model, ocrModel, recCriterion, styleRecCriterion, ocrCriterion, converter, opt)
else:
# log = open(f'./result/{opt.exp_name}/log_evaluation.txt', 'a')
AlignCollate_evaluation = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
eval_data, eval_data_log = hierarchical_dataset(root=opt.eval_data, opt=opt)
evaluation_loader = torch.utils.data.DataLoader(
eval_data, batch_size=opt.batch_size,
shuffle=False,
num_workers=int(opt.workers),
collate_fn=AlignCollate_evaluation, pin_memory=True)
validation_synth_lrw_res(-1,model, ocrModel, None, recCriterion, styleRecCriterion, ocrCriterion, evaluation_loader, converter, opt)
# log.write(eval_data_log)
# print(f'{accuracy_by_best_model:0.3f}')
# log.write(f'{accuracy_by_best_model:0.3f}\n')
# log.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp_dir', default='/checkpoint/pkrishnan/experiments/scribe/Exp06/', help='Where to store logs and models')
parser.add_argument('--exp_name', default='debug', help='Where to store logs and models')
parser.add_argument('--eval_data', required=True, help='path to evaluation dataset')
parser.add_argument('--benchmark_all_eval', action='store_true', help='evaluate 10 benchmark evaluation datasets')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch_size', type=int, default=192, help='input batch size')
parser.add_argument('--saved_ocr_model', default='', help="path to model to continue training")
parser.add_argument('--saved_synth_model', default='', help="path to model to continue training")
""" Data processing """
parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')
parser.add_argument('--batch_min_length', type=int, default=1, help='minimum-label-length')
parser.add_argument('--fixedString', action='store_true', help='use fixed length data')
parser.add_argument('--batch_exact_length', type=int, default=5, help='exact-label-length')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser.add_argument('--ocr_imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--ocr_imgW', type=int, default=100, help='the width of the input image')
parser.add_argument('--rgb', action='store_true', help='use rgb input')
parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')
parser.add_argument('--lexFile', default='/checkpoint/pkrishnan/datasets/vocab/english-words.txt', help='unqiue words in language')
""" Model Architecture """
parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS')
parser.add_argument('--FeatureExtraction', type=str, required=True, help='FeatureExtraction stage. VGG|RCNN|ResNet')
parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM')
parser.add_argument('--Prediction', type=str, required=True, help='Prediction stage. CTC|Attn')
parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser.add_argument('--input_channel', type=int, default=1, help='the number of input channel of Feature extractor')
parser.add_argument('--ocr_input_channel', type=int, default=1,
help='the number of input channel of Feature extractor')
parser.add_argument('--output_channel', type=int, default=512,
help='the number of output channel of Feature extractor')
parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
parser.add_argument('--char_embed_size', type=int, default=60, help='character embedding for content encoder')
parser.add_argument('--ocrFixed', action='store_true', help='true: for pretrined OCR and fixed weights')
parser.add_argument('--ocrWeight', type=float, default=1.0, help='weights for loss')
parser.add_argument('--reconWeight', type=float, default=1.0, help='weights for loss')
parser.add_argument('--disWeight', type=float, default=1.0, help='weights for loss')
parser.add_argument('--styleReconWeight', type=float, default=1.0, help='weights for loss')
parser.add_argument('--debugFlag', action='store_true', help='for debugging')
parser.add_argument('--testFlag', action='store_true', help='for testing')
opt = parser.parse_args()
""" vocab / character number configuration """
if opt.sensitive:
opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
test(opt)
| 49.243421
| 489
| 0.644534
| 15,380
| 112,275
| 4.364109
| 0.03212
| 0.024002
| 0.032002
| 0.014943
| 0.917729
| 0.904782
| 0.898972
| 0.890256
| 0.885355
| 0.881079
| 0
| 0.026746
| 0.245397
| 112,275
| 2,279
| 490
| 49.265029
| 0.765483
| 0.143353
| 0
| 0.839945
| 0
| 0.000684
| 0.046411
| 0.007322
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011628
| false
| 0
| 0.010944
| 0
| 0.034884
| 0.01368
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8b1d9ea28050f3a7c638ec643d8ee847a4bcc10a
| 185,462
|
py
|
Python
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_sysadmin_asic_errors_ael.py
|
bopopescu/ACI
|
dd717bc74739eeed4747b3ea9e36b239580df5e1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_sysadmin_asic_errors_ael.py
|
bopopescu/ACI
|
dd717bc74739eeed4747b3ea9e36b239580df5e1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_sysadmin_asic_errors_ael.py
|
bopopescu/ACI
|
dd717bc74739eeed4747b3ea9e36b239580df5e1
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-07-22T04:04:44.000Z
|
2020-07-22T04:04:44.000Z
|
""" Cisco_IOS_XR_sysadmin_asic_errors_ael
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class AsicErrors(Entity):
"""
.. attribute:: device_name (key)
**type**\: str
.. attribute:: instance
**type**\: list of :py:class:`Instance <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance>`
.. attribute:: show_all_instances
**type**\: :py:class:`ShowAllInstances <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors, self).__init__()
self._top_entity = None
self.yang_name = "asic-errors"
self.yang_parent_name = "Cisco-IOS-XR-sysadmin-asic-errors-ael"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = ['device_name']
self._child_container_classes = OrderedDict([("show-all-instances", ("show_all_instances", AsicErrors.ShowAllInstances))])
self._child_list_classes = OrderedDict([("instance", ("instance", AsicErrors.Instance))])
self._leafs = OrderedDict([
('device_name', YLeaf(YType.str, 'device-name')),
])
self.device_name = None
self.show_all_instances = AsicErrors.ShowAllInstances()
self.show_all_instances.parent = self
self._children_name_map["show_all_instances"] = "show-all-instances"
self._children_yang_names.add("show-all-instances")
self.instance = YList(self)
self._segment_path = lambda: "Cisco-IOS-XR-sysadmin-asic-errors-ael:asic-errors" + "[device-name='" + str(self.device_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors, ['device_name'], name, value)
class Instance(Entity):
"""
.. attribute:: instance_num (key)
**type**\: int
**range:** 0..4294967295
.. attribute:: sbe
**type**\: :py:class:`Sbe <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Sbe>`
.. attribute:: mbe
**type**\: :py:class:`Mbe <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Mbe>`
.. attribute:: parity
**type**\: :py:class:`Parity <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Parity>`
.. attribute:: generic
**type**\: :py:class:`Generic <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Generic>`
.. attribute:: crc
**type**\: :py:class:`Crc <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Crc>`
.. attribute:: reset
**type**\: :py:class:`Reset <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Reset>`
.. attribute:: barrier
**type**\: :py:class:`Barrier <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Barrier>`
.. attribute:: unexpected
**type**\: :py:class:`Unexpected <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Unexpected>`
.. attribute:: link
**type**\: :py:class:`Link <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Link>`
.. attribute:: oor_thresh
**type**\: :py:class:`OorThresh <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.OorThresh>`
.. attribute:: bp
**type**\: :py:class:`Bp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Bp>`
.. attribute:: io
**type**\: :py:class:`Io <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Io>`
.. attribute:: ucode
**type**\: :py:class:`Ucode <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Ucode>`
.. attribute:: config
**type**\: :py:class:`Config <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Config>`
.. attribute:: indirect
**type**\: :py:class:`Indirect <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Indirect>`
.. attribute:: nonerr
**type**\: :py:class:`Nonerr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Nonerr>`
.. attribute:: summary
**type**\: :py:class:`Summary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Summary>`
.. attribute:: all
**type**\: :py:class:`All <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.All>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance, self).__init__()
self.yang_name = "instance"
self.yang_parent_name = "asic-errors"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['instance_num']
self._child_container_classes = OrderedDict([("sbe", ("sbe", AsicErrors.Instance.Sbe)), ("mbe", ("mbe", AsicErrors.Instance.Mbe)), ("parity", ("parity", AsicErrors.Instance.Parity)), ("generic", ("generic", AsicErrors.Instance.Generic)), ("crc", ("crc", AsicErrors.Instance.Crc)), ("reset", ("reset", AsicErrors.Instance.Reset)), ("barrier", ("barrier", AsicErrors.Instance.Barrier)), ("unexpected", ("unexpected", AsicErrors.Instance.Unexpected)), ("link", ("link", AsicErrors.Instance.Link)), ("oor-thresh", ("oor_thresh", AsicErrors.Instance.OorThresh)), ("bp", ("bp", AsicErrors.Instance.Bp)), ("io", ("io", AsicErrors.Instance.Io)), ("ucode", ("ucode", AsicErrors.Instance.Ucode)), ("config", ("config", AsicErrors.Instance.Config)), ("indirect", ("indirect", AsicErrors.Instance.Indirect)), ("nonerr", ("nonerr", AsicErrors.Instance.Nonerr)), ("summary", ("summary", AsicErrors.Instance.Summary)), ("all", ("all", AsicErrors.Instance.All))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('instance_num', YLeaf(YType.uint32, 'instance-num')),
])
self.instance_num = None
self.sbe = AsicErrors.Instance.Sbe()
self.sbe.parent = self
self._children_name_map["sbe"] = "sbe"
self._children_yang_names.add("sbe")
self.mbe = AsicErrors.Instance.Mbe()
self.mbe.parent = self
self._children_name_map["mbe"] = "mbe"
self._children_yang_names.add("mbe")
self.parity = AsicErrors.Instance.Parity()
self.parity.parent = self
self._children_name_map["parity"] = "parity"
self._children_yang_names.add("parity")
self.generic = AsicErrors.Instance.Generic()
self.generic.parent = self
self._children_name_map["generic"] = "generic"
self._children_yang_names.add("generic")
self.crc = AsicErrors.Instance.Crc()
self.crc.parent = self
self._children_name_map["crc"] = "crc"
self._children_yang_names.add("crc")
self.reset = AsicErrors.Instance.Reset()
self.reset.parent = self
self._children_name_map["reset"] = "reset"
self._children_yang_names.add("reset")
self.barrier = AsicErrors.Instance.Barrier()
self.barrier.parent = self
self._children_name_map["barrier"] = "barrier"
self._children_yang_names.add("barrier")
self.unexpected = AsicErrors.Instance.Unexpected()
self.unexpected.parent = self
self._children_name_map["unexpected"] = "unexpected"
self._children_yang_names.add("unexpected")
self.link = AsicErrors.Instance.Link()
self.link.parent = self
self._children_name_map["link"] = "link"
self._children_yang_names.add("link")
self.oor_thresh = AsicErrors.Instance.OorThresh()
self.oor_thresh.parent = self
self._children_name_map["oor_thresh"] = "oor-thresh"
self._children_yang_names.add("oor-thresh")
self.bp = AsicErrors.Instance.Bp()
self.bp.parent = self
self._children_name_map["bp"] = "bp"
self._children_yang_names.add("bp")
self.io = AsicErrors.Instance.Io()
self.io.parent = self
self._children_name_map["io"] = "io"
self._children_yang_names.add("io")
self.ucode = AsicErrors.Instance.Ucode()
self.ucode.parent = self
self._children_name_map["ucode"] = "ucode"
self._children_yang_names.add("ucode")
self.config = AsicErrors.Instance.Config()
self.config.parent = self
self._children_name_map["config"] = "config"
self._children_yang_names.add("config")
self.indirect = AsicErrors.Instance.Indirect()
self.indirect.parent = self
self._children_name_map["indirect"] = "indirect"
self._children_yang_names.add("indirect")
self.nonerr = AsicErrors.Instance.Nonerr()
self.nonerr.parent = self
self._children_name_map["nonerr"] = "nonerr"
self._children_yang_names.add("nonerr")
self.summary = AsicErrors.Instance.Summary()
self.summary.parent = self
self._children_name_map["summary"] = "summary"
self._children_yang_names.add("summary")
self.all = AsicErrors.Instance.All()
self.all.parent = self
self._children_name_map["all"] = "all"
self._children_yang_names.add("all")
self._segment_path = lambda: "instance" + "[instance-num='" + str(self.instance_num) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance, ['instance_num'], name, value)
class Sbe(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Sbe.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Sbe, self).__init__()
self.yang_name = "sbe"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.Sbe.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "sbe"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Sbe, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Sbe.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Sbe.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "sbe"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.Sbe.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Sbe.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Sbe.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Sbe.Location.LogLst, ['log_line'], name, value)
class Mbe(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Mbe.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Mbe, self).__init__()
self.yang_name = "mbe"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.Mbe.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "mbe"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Mbe, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Mbe.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Mbe.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "mbe"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.Mbe.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Mbe.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Mbe.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Mbe.Location.LogLst, ['log_line'], name, value)
class Parity(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Parity.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Parity, self).__init__()
self.yang_name = "parity"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.Parity.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "parity"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Parity, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Parity.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Parity.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "parity"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.Parity.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Parity.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Parity.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Parity.Location.LogLst, ['log_line'], name, value)
class Generic(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Generic.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Generic, self).__init__()
self.yang_name = "generic"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.Generic.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "generic"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Generic, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Generic.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Generic.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "generic"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.Generic.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Generic.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Generic.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Generic.Location.LogLst, ['log_line'], name, value)
class Crc(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Crc.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Crc, self).__init__()
self.yang_name = "crc"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.Crc.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "crc"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Crc, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Crc.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Crc.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "crc"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.Crc.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Crc.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Crc.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Crc.Location.LogLst, ['log_line'], name, value)
class Reset(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Reset.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Reset, self).__init__()
self.yang_name = "reset"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.Reset.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "reset"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Reset, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Reset.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Reset.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "reset"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.Reset.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Reset.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Reset.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Reset.Location.LogLst, ['log_line'], name, value)
class Barrier(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Barrier.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Barrier, self).__init__()
self.yang_name = "barrier"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.Barrier.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "barrier"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Barrier, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Barrier.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Barrier.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "barrier"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.Barrier.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Barrier.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Barrier.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Barrier.Location.LogLst, ['log_line'], name, value)
class Unexpected(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Unexpected.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Unexpected, self).__init__()
self.yang_name = "unexpected"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.Unexpected.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "unexpected"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Unexpected, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Unexpected.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Unexpected.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "unexpected"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.Unexpected.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Unexpected.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Unexpected.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Unexpected.Location.LogLst, ['log_line'], name, value)
class Link(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Link.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Link, self).__init__()
self.yang_name = "link"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.Link.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "link"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Link, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Link.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Link.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "link"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.Link.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Link.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Link.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Link.Location.LogLst, ['log_line'], name, value)
class OorThresh(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.OorThresh.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.OorThresh, self).__init__()
self.yang_name = "oor-thresh"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.OorThresh.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "oor-thresh"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.OorThresh, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.OorThresh.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.OorThresh.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "oor-thresh"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.OorThresh.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.OorThresh.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.OorThresh.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.OorThresh.Location.LogLst, ['log_line'], name, value)
class Bp(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Bp.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Bp, self).__init__()
self.yang_name = "bp"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.Bp.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "bp"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Bp, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Bp.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Bp.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "bp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.Bp.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Bp.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Bp.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Bp.Location.LogLst, ['log_line'], name, value)
class Io(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Io.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Io, self).__init__()
self.yang_name = "io"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.Io.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "io"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Io, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Io.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Io.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "io"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.Io.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Io.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Io.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Io.Location.LogLst, ['log_line'], name, value)
class Ucode(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Ucode.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Ucode, self).__init__()
self.yang_name = "ucode"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.Ucode.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "ucode"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Ucode, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Ucode.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Ucode.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "ucode"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.Ucode.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Ucode.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Ucode.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Ucode.Location.LogLst, ['log_line'], name, value)
class Config(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Config.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Config, self).__init__()
self.yang_name = "config"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.Config.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "config"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Config, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Config.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Config.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "config"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.Config.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Config.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Config.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Config.Location.LogLst, ['log_line'], name, value)
class Indirect(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Indirect.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Indirect, self).__init__()
self.yang_name = "indirect"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.Indirect.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "indirect"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Indirect, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Indirect.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Indirect.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "indirect"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.Indirect.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Indirect.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Indirect.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Indirect.Location.LogLst, ['log_line'], name, value)
class Nonerr(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Nonerr.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Nonerr, self).__init__()
self.yang_name = "nonerr"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.Nonerr.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "nonerr"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Nonerr, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Nonerr.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Nonerr.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "nonerr"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.Nonerr.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Nonerr.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Nonerr.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Nonerr.Location.LogLst, ['log_line'], name, value)
class Summary(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Summary.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Summary, self).__init__()
self.yang_name = "summary"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.Summary.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "summary"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Summary, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.Summary.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Summary.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "summary"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.Summary.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Summary.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.Summary.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.Summary.Location.LogLst, ['log_line'], name, value)
class All(Entity):
"""
.. attribute:: history
**type**\: :py:class:`History <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.All.History>`
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.All.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.All, self).__init__()
self.yang_name = "all"
self.yang_parent_name = "instance"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("history", ("history", AsicErrors.Instance.All.History))])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.All.Location))])
self._leafs = OrderedDict()
self.history = AsicErrors.Instance.All.History()
self.history.parent = self
self._children_name_map["history"] = "history"
self._children_yang_names.add("history")
self.location = YList(self)
self._segment_path = lambda: "all"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.All, [], name, value)
class History(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.All.History.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.All.History, self).__init__()
self.yang_name = "history"
self.yang_parent_name = "all"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.Instance.All.History.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "history"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.All.History, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.All.History.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.All.History.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "history"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.All.History.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.All.History.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.All.History.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.All.History.Location.LogLst, ['log_line'], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.Instance.All.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.All.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "all"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.Instance.All.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.All.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.Instance.All.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.Instance.All.Location.LogLst, ['log_line'], name, value)
class ShowAllInstances(Entity):
"""
.. attribute:: sbe
**type**\: :py:class:`Sbe <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Sbe>`
.. attribute:: mbe
**type**\: :py:class:`Mbe <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Mbe>`
.. attribute:: parity
**type**\: :py:class:`Parity <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Parity>`
.. attribute:: generic
**type**\: :py:class:`Generic <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Generic>`
.. attribute:: crc
**type**\: :py:class:`Crc <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Crc>`
.. attribute:: reset
**type**\: :py:class:`Reset <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Reset>`
.. attribute:: barrier
**type**\: :py:class:`Barrier <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Barrier>`
.. attribute:: unexpected
**type**\: :py:class:`Unexpected <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Unexpected>`
.. attribute:: link
**type**\: :py:class:`Link <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Link>`
.. attribute:: oor_thresh
**type**\: :py:class:`OorThresh <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.OorThresh>`
.. attribute:: bp
**type**\: :py:class:`Bp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Bp>`
.. attribute:: io
**type**\: :py:class:`Io <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Io>`
.. attribute:: ucode
**type**\: :py:class:`Ucode <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Ucode>`
.. attribute:: config
**type**\: :py:class:`Config <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Config>`
.. attribute:: indirect
**type**\: :py:class:`Indirect <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Indirect>`
.. attribute:: nonerr
**type**\: :py:class:`Nonerr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Nonerr>`
.. attribute:: summary
**type**\: :py:class:`Summary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Summary>`
.. attribute:: all
**type**\: :py:class:`All <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.All>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances, self).__init__()
self.yang_name = "show-all-instances"
self.yang_parent_name = "asic-errors"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("sbe", ("sbe", AsicErrors.ShowAllInstances.Sbe)), ("mbe", ("mbe", AsicErrors.ShowAllInstances.Mbe)), ("parity", ("parity", AsicErrors.ShowAllInstances.Parity)), ("generic", ("generic", AsicErrors.ShowAllInstances.Generic)), ("crc", ("crc", AsicErrors.ShowAllInstances.Crc)), ("reset", ("reset", AsicErrors.ShowAllInstances.Reset)), ("barrier", ("barrier", AsicErrors.ShowAllInstances.Barrier)), ("unexpected", ("unexpected", AsicErrors.ShowAllInstances.Unexpected)), ("link", ("link", AsicErrors.ShowAllInstances.Link)), ("oor-thresh", ("oor_thresh", AsicErrors.ShowAllInstances.OorThresh)), ("bp", ("bp", AsicErrors.ShowAllInstances.Bp)), ("io", ("io", AsicErrors.ShowAllInstances.Io)), ("ucode", ("ucode", AsicErrors.ShowAllInstances.Ucode)), ("config", ("config", AsicErrors.ShowAllInstances.Config)), ("indirect", ("indirect", AsicErrors.ShowAllInstances.Indirect)), ("nonerr", ("nonerr", AsicErrors.ShowAllInstances.Nonerr)), ("summary", ("summary", AsicErrors.ShowAllInstances.Summary)), ("all", ("all", AsicErrors.ShowAllInstances.All))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.sbe = AsicErrors.ShowAllInstances.Sbe()
self.sbe.parent = self
self._children_name_map["sbe"] = "sbe"
self._children_yang_names.add("sbe")
self.mbe = AsicErrors.ShowAllInstances.Mbe()
self.mbe.parent = self
self._children_name_map["mbe"] = "mbe"
self._children_yang_names.add("mbe")
self.parity = AsicErrors.ShowAllInstances.Parity()
self.parity.parent = self
self._children_name_map["parity"] = "parity"
self._children_yang_names.add("parity")
self.generic = AsicErrors.ShowAllInstances.Generic()
self.generic.parent = self
self._children_name_map["generic"] = "generic"
self._children_yang_names.add("generic")
self.crc = AsicErrors.ShowAllInstances.Crc()
self.crc.parent = self
self._children_name_map["crc"] = "crc"
self._children_yang_names.add("crc")
self.reset = AsicErrors.ShowAllInstances.Reset()
self.reset.parent = self
self._children_name_map["reset"] = "reset"
self._children_yang_names.add("reset")
self.barrier = AsicErrors.ShowAllInstances.Barrier()
self.barrier.parent = self
self._children_name_map["barrier"] = "barrier"
self._children_yang_names.add("barrier")
self.unexpected = AsicErrors.ShowAllInstances.Unexpected()
self.unexpected.parent = self
self._children_name_map["unexpected"] = "unexpected"
self._children_yang_names.add("unexpected")
self.link = AsicErrors.ShowAllInstances.Link()
self.link.parent = self
self._children_name_map["link"] = "link"
self._children_yang_names.add("link")
self.oor_thresh = AsicErrors.ShowAllInstances.OorThresh()
self.oor_thresh.parent = self
self._children_name_map["oor_thresh"] = "oor-thresh"
self._children_yang_names.add("oor-thresh")
self.bp = AsicErrors.ShowAllInstances.Bp()
self.bp.parent = self
self._children_name_map["bp"] = "bp"
self._children_yang_names.add("bp")
self.io = AsicErrors.ShowAllInstances.Io()
self.io.parent = self
self._children_name_map["io"] = "io"
self._children_yang_names.add("io")
self.ucode = AsicErrors.ShowAllInstances.Ucode()
self.ucode.parent = self
self._children_name_map["ucode"] = "ucode"
self._children_yang_names.add("ucode")
self.config = AsicErrors.ShowAllInstances.Config()
self.config.parent = self
self._children_name_map["config"] = "config"
self._children_yang_names.add("config")
self.indirect = AsicErrors.ShowAllInstances.Indirect()
self.indirect.parent = self
self._children_name_map["indirect"] = "indirect"
self._children_yang_names.add("indirect")
self.nonerr = AsicErrors.ShowAllInstances.Nonerr()
self.nonerr.parent = self
self._children_name_map["nonerr"] = "nonerr"
self._children_yang_names.add("nonerr")
self.summary = AsicErrors.ShowAllInstances.Summary()
self.summary.parent = self
self._children_name_map["summary"] = "summary"
self._children_yang_names.add("summary")
self.all = AsicErrors.ShowAllInstances.All()
self.all.parent = self
self._children_name_map["all"] = "all"
self._children_yang_names.add("all")
self._segment_path = lambda: "show-all-instances"
class Sbe(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Sbe.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Sbe, self).__init__()
self.yang_name = "sbe"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.Sbe.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "sbe"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Sbe, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Sbe.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Sbe.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "sbe"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.Sbe.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Sbe.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Sbe.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Sbe.Location.LogLst, ['log_line'], name, value)
class Mbe(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Mbe.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Mbe, self).__init__()
self.yang_name = "mbe"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.Mbe.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "mbe"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Mbe, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Mbe.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Mbe.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "mbe"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.Mbe.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Mbe.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Mbe.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Mbe.Location.LogLst, ['log_line'], name, value)
class Parity(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Parity.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Parity, self).__init__()
self.yang_name = "parity"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.Parity.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "parity"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Parity, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Parity.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Parity.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "parity"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.Parity.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Parity.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Parity.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Parity.Location.LogLst, ['log_line'], name, value)
class Generic(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Generic.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Generic, self).__init__()
self.yang_name = "generic"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.Generic.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "generic"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Generic, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Generic.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Generic.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "generic"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.Generic.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Generic.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Generic.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Generic.Location.LogLst, ['log_line'], name, value)
class Crc(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Crc.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Crc, self).__init__()
self.yang_name = "crc"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.Crc.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "crc"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Crc, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Crc.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Crc.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "crc"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.Crc.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Crc.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Crc.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Crc.Location.LogLst, ['log_line'], name, value)
class Reset(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Reset.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Reset, self).__init__()
self.yang_name = "reset"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.Reset.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "reset"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Reset, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Reset.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Reset.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "reset"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.Reset.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Reset.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Reset.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Reset.Location.LogLst, ['log_line'], name, value)
class Barrier(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Barrier.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Barrier, self).__init__()
self.yang_name = "barrier"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.Barrier.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "barrier"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Barrier, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Barrier.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Barrier.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "barrier"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.Barrier.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Barrier.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Barrier.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Barrier.Location.LogLst, ['log_line'], name, value)
class Unexpected(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Unexpected.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Unexpected, self).__init__()
self.yang_name = "unexpected"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.Unexpected.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "unexpected"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Unexpected, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Unexpected.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Unexpected.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "unexpected"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.Unexpected.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Unexpected.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Unexpected.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Unexpected.Location.LogLst, ['log_line'], name, value)
class Link(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Link.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Link, self).__init__()
self.yang_name = "link"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.Link.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "link"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Link, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Link.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Link.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "link"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.Link.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Link.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Link.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Link.Location.LogLst, ['log_line'], name, value)
class OorThresh(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.OorThresh.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.OorThresh, self).__init__()
self.yang_name = "oor-thresh"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.OorThresh.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "oor-thresh"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.OorThresh, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.OorThresh.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.OorThresh.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "oor-thresh"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.OorThresh.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.OorThresh.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.OorThresh.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.OorThresh.Location.LogLst, ['log_line'], name, value)
class Bp(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Bp.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Bp, self).__init__()
self.yang_name = "bp"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.Bp.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "bp"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Bp, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Bp.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Bp.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "bp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.Bp.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Bp.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Bp.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Bp.Location.LogLst, ['log_line'], name, value)
class Io(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Io.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Io, self).__init__()
self.yang_name = "io"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.Io.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "io"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Io, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Io.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Io.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "io"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.Io.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Io.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Io.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Io.Location.LogLst, ['log_line'], name, value)
class Ucode(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Ucode.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Ucode, self).__init__()
self.yang_name = "ucode"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.Ucode.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "ucode"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Ucode, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Ucode.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Ucode.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "ucode"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.Ucode.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Ucode.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Ucode.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Ucode.Location.LogLst, ['log_line'], name, value)
class Config(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Config.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Config, self).__init__()
self.yang_name = "config"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.Config.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "config"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Config, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Config.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Config.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "config"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.Config.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Config.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Config.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Config.Location.LogLst, ['log_line'], name, value)
class Indirect(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Indirect.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Indirect, self).__init__()
self.yang_name = "indirect"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.Indirect.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "indirect"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Indirect, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Indirect.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Indirect.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "indirect"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.Indirect.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Indirect.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Indirect.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Indirect.Location.LogLst, ['log_line'], name, value)
class Nonerr(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Nonerr.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Nonerr, self).__init__()
self.yang_name = "nonerr"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.Nonerr.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "nonerr"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Nonerr, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Nonerr.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Nonerr.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "nonerr"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.Nonerr.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Nonerr.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Nonerr.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Nonerr.Location.LogLst, ['log_line'], name, value)
class Summary(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Summary.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Summary, self).__init__()
self.yang_name = "summary"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.Summary.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "summary"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Summary, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.Summary.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Summary.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "summary"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.Summary.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Summary.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.Summary.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.Summary.Location.LogLst, ['log_line'], name, value)
class All(Entity):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.All.Location>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.All, self).__init__()
self.yang_name = "all"
self.yang_parent_name = "show-all-instances"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", AsicErrors.ShowAllInstances.All.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "all"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.All, [], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
**pattern:** ((([fF][0\-3])/(([a\-zA\-Z]){2}\\d{1,2}))\|((0?[0\-9]\|1[1\-5])/((([a\-zA\-Z]){2,3})?\\d{1,2})))(/[cC][pP][uU]0)?
.. attribute:: log_lst
**type**\: list of :py:class:`LogLst <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_asic_errors_ael.AsicErrors.ShowAllInstances.All.Location.LogLst>`
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.All.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "all"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("log-lst", ("log_lst", AsicErrors.ShowAllInstances.All.Location.LogLst))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location-name')),
])
self.location_name = None
self.log_lst = YList(self)
self._segment_path = lambda: "location" + "[location-name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.All.Location, ['location_name'], name, value)
class LogLst(Entity):
"""
.. attribute:: log_line
**type**\: str
"""
_prefix = 'ael'
_revision = '2017-07-05'
def __init__(self):
super(AsicErrors.ShowAllInstances.All.Location.LogLst, self).__init__()
self.yang_name = "log-lst"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('log_line', YLeaf(YType.str, 'log-line')),
])
self.log_line = None
self._segment_path = lambda: "log-lst"
def __setattr__(self, name, value):
self._perform_setattr(AsicErrors.ShowAllInstances.All.Location.LogLst, ['log_line'], name, value)
def clone_ptr(self):
self._top_entity = AsicErrors()
return self._top_entity
| 38.670142
| 1,103
| 0.47111
| 16,119
| 185,462
| 5.070104
| 0.008127
| 0.043463
| 0.028021
| 0.025549
| 0.967048
| 0.963047
| 0.957565
| 0.955583
| 0.954751
| 0.953429
| 0
| 0.014012
| 0.415492
| 185,462
| 4,795
| 1,104
| 38.678206
| 0.73988
| 0.165074
| 0
| 0.832896
| 0
| 0
| 0.074048
| 0.00064
| 0
| 0
| 0
| 0
| 0
| 1
| 0.099738
| false
| 0
| 0.002187
| 0
| 0.153106
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8b358587d6f4f87adcfdb624124e8be84f4de464
| 1,946
|
py
|
Python
|
ltr/model/head/classifier.py
|
DeepBrainsMe/PyDoctor_Final
|
49ecfc64b2a2866e7f37cc79c1f32a817975f064
|
[
"MIT"
] | 1
|
2021-05-19T06:46:05.000Z
|
2021-05-19T06:46:05.000Z
|
ltr/model/head/classifier.py
|
DeepBrainsMe/PyDoctor_Final
|
49ecfc64b2a2866e7f37cc79c1f32a817975f064
|
[
"MIT"
] | null | null | null |
ltr/model/head/classifier.py
|
DeepBrainsMe/PyDoctor_Final
|
49ecfc64b2a2866e7f37cc79c1f32a817975f064
|
[
"MIT"
] | null | null | null |
import math
import torch.nn as nn
class Classifier(nn.Module):
def __init__(self,num_classes = 2):
super().__init__()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight.data, mode='fan_in')
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class Classifier_50(nn.Module):
def __init__(self,num_classes = 2):
super().__init__()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(2048, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight.data, mode='fan_in')
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class SiamClassifier(nn.Module):
def __init__(self,num_classes = 2):
super().__init__()
# self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight.data, mode='fan_in')
if m.bias is not None:
m.bias.data.zero_()
def forward(self, sag_feat,ax_feat):
print(sag_feat.shape())
print(ax_feat.shape())
# x = x.view(x.size(0), -1)
# x = self.fc(x)
return None
| 31.387097
| 105
| 0.562179
| 273
| 1,946
| 3.835165
| 0.201465
| 0.094556
| 0.111748
| 0.08596
| 0.883477
| 0.883477
| 0.883477
| 0.883477
| 0.883477
| 0.883477
| 0
| 0.026568
| 0.3037
| 1,946
| 62
| 106
| 31.387097
| 0.746125
| 0.062693
| 0
| 0.75
| 0
| 0
| 0.00989
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.045455
| 0
| 0.318182
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8cbf61f2ed2652f05490036976f015c9154ce044
| 10,171
|
py
|
Python
|
Projects/ABM_DA/bussim/A02_doing_nothing_analysis.py
|
RobertClay/DUST-RC
|
09f7ec9d8d093021d068dff8a7a48c15ea318b86
|
[
"MIT"
] | 15
|
2018-11-21T14:57:24.000Z
|
2022-03-04T15:42:09.000Z
|
Projects/ABM_DA/bussim/A02_doing_nothing_analysis.py
|
RobertClay/DUST-RC
|
09f7ec9d8d093021d068dff8a7a48c15ea318b86
|
[
"MIT"
] | 125
|
2019-11-06T13:03:35.000Z
|
2022-03-07T13:38:33.000Z
|
Projects/ABM_DA/bussim/A02_doing_nothing_analysis.py
|
RobertClay/DUST-RC
|
09f7ec9d8d093021d068dff8a7a48c15ea318b86
|
[
"MIT"
] | 6
|
2018-11-20T15:56:49.000Z
|
2021-10-08T10:21:06.000Z
|
# -*- coding: utf-8 -*-
"""
This code will analyse the modelling results of BusSim and plot it along side a number of uncalibrated models (timespace diagram of bus trajectories)
@author: geomlk
"""
import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
os.chdir("/Users/minhkieu/Documents/Github/dust/Projects/ABM_DA/bussim/")
#Step 1: Load calibration results
def load_actual_params_IncreaseRate(IncreaseRate):
#load up a model from a Pickle
#with open('C:/Users/geomlk/Dropbox/Minh_UoL/DA/ABM/BusSim/Data/Realtime_data_IncreaseRate_9.pkl','rb') as f2:
name0 = ['./Data/Realtime_data_IncreaseRate_',str(IncreaseRate),'.pkl']
str1 = ''.join(name0)
with open(str1, 'rb') as f:
model_params,t,x,GroundTruth = pickle.load(f)
return model_params,t,x
def load_actual_params_maxDemand(maxDemand):
#load up a model from a Pickle
#with open('C:/Users/geomlk/Dropbox/Minh_UoL/DA/ABM/BusSim/Data/Realtime_data_IncreaseRate_9.pkl','rb') as f2:
name0 = ['./Data/Realtime_data_static_maxDemand_',str(maxDemand),'.pkl']
str1 = ''.join(name0)
with open(str1, 'rb') as f:
model_params,t,x,GroundTruth = pickle.load(f)
return model_params,t,x
def rmse(yhat,y): #define the RMSE function
return np.sqrt(np.square(np.subtract(yhat, y).mean()))
def IncreaseRate_analysis(): #this is the code to analyse the simulation results when the demand increase by 1 to 20%
Results = [0,0]
do_plot=True
for IncreaseRate in range(1,20,1):
#load the synthetic real-time GPS
model_params, t,x = load_actual_params_IncreaseRate(IncreaseRate)
#define parameters for simulation
NumberOfStop=20
minDemand=0.5
maxDemand=2
#Initialise the ArrivalRate and DepartureRate
ArrivalRate = np.random.uniform(minDemand / 60, maxDemand / 60, NumberOfStop)
DepartureRate = np.sort(np.random.uniform(0.05, 0.5,NumberOfStop))
DepartureRate[0]=0
TrafficSpeed = np.random.uniform(11, 17)
#Initialise the model parameters
model_params = {
"dt": 10,
"minDemand":minDemand,
"NumberOfStop": NumberOfStop,
"LengthBetweenStop": 2000,
"EndTime": 6000,
"Headway": 5 * 60,
"BurnIn": 1 * 60,
"AlightTime": 1,
"BoardTime": 3,
"StoppingTime": 3,
"BusAcceleration": 3 # m/s
}
'''run BusSim-deterministic with random parameters'''
from BusSim_deterministic import Model as Model1
model = Model1(model_params, TrafficSpeed,ArrivalRate,DepartureRate)
for time_step in range(int(model.EndTime / model.dt)):
model.step()
x3 = np.array([bus.trajectory for bus in model.buses]).T
t3 = np.arange(0, model.EndTime, model.dt)
x3[x3 <= 0 ] = np.nan
x3[x3 >= (model.NumberOfStop * model.LengthBetweenStop)] = np.nan
'''run BusSim-stochastic with random parameters'''
ArrivalRate = np.random.uniform(minDemand / 60, maxDemand / 60, NumberOfStop)
DepartureRate = np.sort(np.random.uniform(0.05, 0.5,NumberOfStop))
DepartureRate[0]=0
TrafficSpeed = np.random.uniform(11, 17)
from BusSim_stochastic import Model as Model2
model = Model2(model_params, TrafficSpeed,ArrivalRate,DepartureRate)
for time_step in range(int(model.EndTime / model.dt)):
model.step()
x2 = np.array([bus.trajectory for bus in model.buses]).T
t2 = np.arange(0, model.EndTime, model.dt)
x2[x2 <= 0 ] = np.nan
x2[x2 >= (model.NumberOfStop * model.LengthBetweenStop)] = np.nan
''' we may plot individual run if it's needed'''
if do_plot:
plt.figure(3, figsize=(16 / 2, 9 / 2))
plt.clf()
plt.ylabel('Distance (m)')
plt.xlabel('Time (s)')
plt.plot(t3, x3, linewidth=1.5,linestyle = '--',color='b')
plt.plot(t2, x2, linewidth=1.5,linestyle = ':',color='r')
plt.plot(t, x, linewidth=1,color='black',linestyle = '-')
plt.plot([], [], linewidth=1.5,linestyle = '--',color='b',label='BusSim-deterministic')
plt.plot([], [], linewidth=1.5,linestyle = ':',color='r',label='BusSim-stochastic')
plt.plot([], [], linewidth=1,color='black',linestyle = '-',label='Real-time')
plt.legend()
plt.show()
name0 = ['./Figures/Fig_do_nothing_IncreaseRate_',str(IncreaseRate),'.pdf']
str1 = ''.join(name0)
plt.savefig(str1, dpi=200,bbox_inches='tight')
'''collect outputs data and calculate RMSE'''
x3[np.isnan(x3)]=0
x2[np.isnan(x2)]=0
x[np.isnan(x)]=0
RMSE1 = rmse(x3,x)
RMSE2 = rmse(x2,x)
Results = np.vstack((Results,[RMSE1,RMSE2]))
''' this plot is the main results plot'''
do_plot_results=True
if do_plot_results:
plt.figure(3, figsize=(16 / 2, 9 / 2))
plt.clf()
plt.plot(np.arange(1,20,1),Results[1:,0],linewidth=1.5,linestyle = '--',color='b',label='BusSim-deterministic')
plt.plot(np.arange(1,20,1),Results[1:,1],linewidth=1.5,linestyle = ':',color='r',label='BusSim-stochastic')
plt.ylabel('RMSE (m)')
plt.xlabel(r'$\xi$ (%)')
plt.legend()
plt.show()
plt.savefig('./Figures/Fig_do_nothing_results.pdf', dpi=200,bbox_inches='tight')
return Results
'''
Function to evaluate results when the maximum demand increases from 0.5 to 4.5
'''
def maxDemand_analysis():
Results = [0,0]
do_plot=False
for maxDemand in range(1,10,1):
maxDemand =maxDemand/2
model_params, t,x = load_actual_params_maxDemand(maxDemand)
NumberOfStop=20
minDemand=0.5
#Initialise the ArrivalRate and DepartureRate
ArrivalRate = np.random.uniform(minDemand / 60, maxDemand / 60, NumberOfStop)
DepartureRate = np.sort(np.random.uniform(0.05, 0.5,NumberOfStop))
DepartureRate[0]=0
TrafficSpeed = np.random.uniform(11, 17)
#Initialise the model parameters
model_params = {
"dt": 10,
"minDemand":minDemand,
"NumberOfStop": NumberOfStop,
"LengthBetweenStop": 2000,
"EndTime": 6000,
"Headway": 5 * 60,
"BurnIn": 1 * 60,
"AlightTime": 1,
"BoardTime": 3,
"StoppingTime": 3,
"BusAcceleration": 3 # m/s
}
'''run BusSim-deterministic with random parameters'''
from BusSim_deterministic import Model as Model1
model = Model1(model_params, TrafficSpeed,ArrivalRate,DepartureRate)
for time_step in range(int(model.EndTime / model.dt)):
model.step()
x3 = np.array([bus.trajectory for bus in model.buses]).T
t3 = np.arange(0, model.EndTime, model.dt)
x3[x3 <= 0 ] = np.nan
x3[x3 >= (model.NumberOfStop * model.LengthBetweenStop)] = np.nan
ArrivalRate = np.random.uniform(minDemand / 60, maxDemand / 60, NumberOfStop)
DepartureRate = np.sort(np.random.uniform(0.05, 0.5,NumberOfStop))
DepartureRate[0]=0
TrafficSpeed = np.random.uniform(11, 17)
'''run BusSim-stochastic with random parameters'''
from BusSim_stochastic import Model as Model2
model = Model2(model_params, TrafficSpeed,ArrivalRate,DepartureRate)
for time_step in range(int(model.EndTime / model.dt)):
model.step()
x2 = np.array([bus.trajectory for bus in model.buses]).T
t2 = np.arange(0, model.EndTime, model.dt)
x2[x2 <= 0 ] = np.nan
x2[x2 >= (model.NumberOfStop * model.LengthBetweenStop)] = np.nan
''' we may plot individual run if it's needed'''
if do_plot:
plt.figure(3, figsize=(16 / 2, 9 / 2))
plt.clf()
plt.ylabel('Distance (m)')
plt.xlabel('Time (s)')
plt.plot(t3, x3, linewidth=.5,linestyle = '--',color='b')
plt.plot(t2, x2, linewidth=1,linestyle = ':',color='r')
plt.plot(t, x, linewidth=1,color='black',linestyle = '-')
plt.plot([], [], linewidth=.5,linestyle = '--',color='b',label='BusSim-deterministic')
plt.plot([], [], linewidth=1,linestyle = ':',color='r',label='BusSim-stochastic')
plt.plot([], [], linewidth=1,color='black',linestyle = '-',label='Real-time')
plt.legend()
plt.show()
name0 = ['./Figures/Fig_do_nothing_maxDemand_',str(maxDemand),'.pdf']
str1 = ''.join(name0)
plt.savefig(str1, dpi=200,bbox_inches='tight')
'''collect outputs data and calculate RMSE'''
x3[np.isnan(x3)]=0
x2[np.isnan(x2)]=0
x[np.isnan(x)]=0
RMSE1 = rmse(x3,x)
RMSE2 = rmse(x2,x)
Results = np.vstack((Results,[RMSE1,RMSE2]))
''' this plot is the main results plot'''
do_plot_results=True
if do_plot_results:
plt.figure(3, figsize=(16 / 2, 9 / 2))
plt.clf()
plt.plot(np.arange(1,10,1),Results[1:,0],linewidth=1.5,linestyle = '--',color='b',label='BusSim-deterministic')
plt.plot(np.arange(1,10,1),Results[1:,1],linewidth=1.5,linestyle = ':',color='r',label='BusSim-stochastic')
plt.ylabel('RMSE (m)')
plt.xlabel(r'$maxDemand$ (passenger/min)')
plt.xticks(np.arange(1,10,1), (np.arange(1,10,1)/2))
plt.legend()
plt.show()
plt.savefig('./Figures/Fig_do_nothing_results_maxDemand.pdf', dpi=200,bbox_inches='tight')
return Results
if __name__ == '__main__': #main function, just call one of the two evaluation
#Results=maxDemand_analysis()
Results=IncreaseRate_analysis()
| 42.735294
| 149
| 0.58657
| 1,266
| 10,171
| 4.6406
| 0.163507
| 0.019064
| 0.030638
| 0.025872
| 0.841532
| 0.804085
| 0.790468
| 0.780596
| 0.768
| 0.766638
| 0
| 0.042996
| 0.272835
| 10,171
| 238
| 150
| 42.735294
| 0.751352
| 0.092026
| 0
| 0.755556
| 0
| 0
| 0.09941
| 0.033291
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0.005556
| 0.044444
| 0.005556
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8cc014b294909148e5a81f4862f1d7650724fecc
| 98
|
py
|
Python
|
src/lib/pythonds/graphs/__init__.py
|
blockpy-edu/skulpt
|
dc70288aedcd7670605ef28f8525546440b39f93
|
[
"MIT"
] | 4
|
2020-01-19T01:42:06.000Z
|
2021-05-13T09:51:38.000Z
|
src/lib/pythonds/graphs/__init__.py
|
blockpy-edu/skulpt
|
dc70288aedcd7670605ef28f8525546440b39f93
|
[
"MIT"
] | null | null | null |
src/lib/pythonds/graphs/__init__.py
|
blockpy-edu/skulpt
|
dc70288aedcd7670605ef28f8525546440b39f93
|
[
"MIT"
] | 4
|
2019-10-16T21:50:53.000Z
|
2021-01-11T06:25:57.000Z
|
from .adjGraph import Graph
from .adjGraph import Vertex
from .priorityQueue import PriorityQueue
| 24.5
| 40
| 0.846939
| 12
| 98
| 6.916667
| 0.5
| 0.289157
| 0.433735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 98
| 3
| 41
| 32.666667
| 0.965116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8cdce95d38e767ed6b9c3a85d0ad34a63f36f6d4
| 60
|
py
|
Python
|
snidynatrace/wrappers/confluent_kafka/__init__.py
|
krb70/snidynatrace
|
fb22a29d9ad06dedc5b5c219e65244c8cc986dda
|
[
"MIT"
] | null | null | null |
snidynatrace/wrappers/confluent_kafka/__init__.py
|
krb70/snidynatrace
|
fb22a29d9ad06dedc5b5c219e65244c8cc986dda
|
[
"MIT"
] | null | null | null |
snidynatrace/wrappers/confluent_kafka/__init__.py
|
krb70/snidynatrace
|
fb22a29d9ad06dedc5b5c219e65244c8cc986dda
|
[
"MIT"
] | null | null | null |
from .wrapper import Producer
from .wrapper import Consumer
| 20
| 29
| 0.833333
| 8
| 60
| 6.25
| 0.625
| 0.44
| 0.68
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 60
| 2
| 30
| 30
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e83dcf446b299b3ff5377dfd1d27674e37272882
| 35,988
|
py
|
Python
|
pythonBackend/BaseUser.py
|
sajadgzd/softwareEngineeringProject
|
b2c4838b01ae4cb790a64e3c0d0a5bc959054bab
|
[
"MIT"
] | 5
|
2020-06-27T02:57:47.000Z
|
2022-01-12T22:14:08.000Z
|
pythonBackend/BaseUser.py
|
sajadgzd/softwareEngineeringProject
|
b2c4838b01ae4cb790a64e3c0d0a5bc959054bab
|
[
"MIT"
] | null | null | null |
pythonBackend/BaseUser.py
|
sajadgzd/softwareEngineeringProject
|
b2c4838b01ae4cb790a64e3c0d0a5bc959054bab
|
[
"MIT"
] | 2
|
2020-05-14T18:29:55.000Z
|
2020-05-17T05:59:05.000Z
|
import sqlite3
import json
from flask import Flask, jsonify, render_template, request, send_from_directory
import uuid
# ADJUST USER STATUS #~HELPER
def managePointStatus(email):
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
cursor.execute("SELECT * FROM users WHERE [email] = ?",(email,))
userData = cursor.fetchone()
userData = list(userData)
status = userData[5]
points = userData[4]
if status == "VIP":
if points < 25:
userData[5] = "OU"
elif status == "OU":
if points > 30:
userData[5] = "VIP"
cursor.execute("DELETE * FROM users WHERE [email] = ?", (email,))
cursor.execute("INSERT INTO users (email,fullname,password,groupList,reputationScore,status,invitations,blacklist,whitelist,compliments,inbox,referredUsers) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)",tuple(userData))
connection.commit()
connnection.close()
@app.route('/getUserData', methods = ["POST"])
def getUserData():
jsonData = request.json
email = jsonData["email"]
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
cursor.execute("SELECT * FROM users WHERE [email] = ?",(email,))
userData = cursor.fetchone()
userData = list(userData)
userData[3] = json.loads(userData[3]) #grouplist
userData[6] = json.loads(userData[6]) #invitations
userData[7] = json.loads(userData[7]) #blacklist
userData[8] = json.loads(userData[8]) #whitelist
userData[10] = json.loads(userData[10]) #inbox
userData[11] = json.loads(userData[11]) #referredUsers
return (jsonify({
"userData": userData
}))
@app.route('/getGroupData', methods = ["POST"])
def getGroupData():
jsonData = request.json
groupName = jsonData["groupName"]
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
cursor.execute("SELECT * FROM users WHERE [groupName] = ?",(groupName,))
groupData = cursor.fetchone()
groupData = list(groupData)
groupData[2] = json.loads(groupData[2]) #posts
groupData[3] = json.loads(groupData[3]) #member polls
groupData[4] = json.loads(groupData[4]) #group polls
groupData[5] = json.loads(groupData[5]) #member list
return (jsonify({
"groupData": groupData
}))
@app.route('/login', methods = ["POST"])
def login():
jsonData = request.json
email = jsonData["email"]
credentials = jsonData["credentials"]
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
cursor.execute("SELECT * FROM users WHERE [email] = ? AND [credentials] = ?",(jsonData["email"],))
userData = cursor.fetchone()
userData = list(userData)
if userData is not None:
return jsonify({
"Sucess": "Welcome to Team Up!"
})
else:
return jsonify({
"Error": "Sorry, email or password combination does not exist."
})
@app.route('/inviteToGroup', methods = ["POST"])
def inviteToGroup():
#GET JSON DAT
jsonData = request.json
inviter = jsonData["inviterEmail"].lower()
inviterFullname = jsonData["inviterFullname"]
groupName = jsonData["groupName"]
invitee = jsonData["inviteeEmail"].lower()
#CONNECT TO DATABASE
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
cursor.execute("SELECT * FROM users WHERE [email] = ?"(invitee,))
inviteeData = cursor.fetchone()
inviteeData = list(inviteeData)
blackList = json.loads(inviteeData[7])
for blocked in blackList:
if blocked["email"] == inviter:
connection.close()
return jsonify({
"Message": "Sorry, your invitation has been automatically rejected."
})
whiteList = json.loads(inviteeData[8])
for autoAccept in whiteList:
if autoAccept["email"] == inviter:
#Add group to invitee list
groupList = json.loads(inviteeData[3])
groupList.append(groupName)
groupList = json.dumps(groupList)
inviteeData[3] = groupList
cursor.execute("DELETE * FROM users WHERE [email] = ?", (invitee,))
cursor.execute("INSERT INTO users (email,fullname,password,groupList,reputationScore,status,invitations,blacklist,whitelist,compliments,inbox,referredUsers) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)",tuple(inviteeData))
connection.commit()
#Add invitee to group member list
cursor.execute("SELECT * FROM groups WHERE [groupName] = ?",(groupName,))
groupData = list(cursor.fetchone())
memberData = json.loads(groupData[5])
memberData.append({
"member": invitee,
"warnings": 0,
"praises": 0,
"kicks": 0,
"taskscompleted":0
})
memberData = json.dumps(memberData)
groupData[5] = memberData
cursor.execute("DELETE * FROM groups WHERE [groupName] = ?",(groupName,))
cursor.execute("INSERT INTO groups (groupName,status,posts,polls,members) VALUES(?,?,?,?,?)",tuple(groupData))
connection.commit()
connection.close()
return jsonify({
"Message": "Your invitation has been automatically accepted!"
})
invitations = json.loads(inviteeData[6])
invitations.append({
"inviterFullName": inviterFullname,
"inviterEmail" :inviter,
"groupName": groupName
})
invitations = json.dumps(invitations)
cursor.execute("DELETE * FROM users WHERE [email] = ?", (invitee,))
cursor.execute("INSERT INTO users (email,fullname,password,groupList,reputationScore,status,invitations,blacklist,whitelist,compliments,inbox,referredUsers) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)",tuple(inviteeData))
connection.commit()
connection.close()
return jsonify({
"Message": "Your invitation has been sent!"
})
@app.route('/handleGroupInvite', methods = ["POST"])
def handleGroupInvite():
#GET JSON DATA
jsonData = request.json
inviter = jsonData["inviterEmail"]
inviterFullname = jsonData["inviterFullName"]
groupName = jsonData["groupName"]
invitee = jsonData["inviteeEmail"]
message = jsonData["message"]
response = jsonData["response"]
#SQLITE CONNECTION
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
cursor.execute("SELECT * FROM users WHERE [email] = ?"(invitee,))
#If they accept the invitation
if response.lower() == "accepted":
#Add the invitee to the group list
cursor.execute("SELECT * FROM groups WHERE [groupName] = ?",(groupName,))
groupData= list(cursor.fetchone())
memberData = json.loads(groupData[5])
memberData.append({
"member": invitee,
"warnings": 0,
"praises": 0,
"kicks": 0,
"taskscompleted":0
})
memberData = json.dumps(memberData)
groupData[5] = memberList
cursor.execute("DELETE FROM groups WHERE [groupName] = ?",(groupName,))
cursor.execute("INSERT INTO groups (groupName,status,posts,polls,members) VALUES(?,?,?,?,?)",tuple(groupData))
connection.commit()
#Add the group to the invitee's group list
cursor.execute("SELECT * FROM users where [email] = ?",(invitee,))
inviteeData = list(cursor.fetchone())
groupList = json.loads(inviteeData[3])
groupList.append(groupName)
groupList = json.dumps(groupList)
inviteeData[3] =groupList
cursor.execute("DELETE FROM users WHERE [email] = ?",(invitee,))
cursor.execute("INSERT INTO users (email,fullname,password,groupList,reputationScore,status,invitations,blacklist,whitelist,compliments,inbox,referredUsers) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)",tuple(inviteeData))
connection.commit()
#Notify the inviter that they have accepted the invitation
cursor.execute("SELECT * FROM users where [email] = ?",(inviter,))
inviterData = list(cursor.fetchone())
inboxList = json.loads(inviteeData[10])
inboxList.append({
"sender": inviter,
"message": "Your invitation has been accepted by {}.".format(invitee)
})
inboxList = json.dumps(inboxList)
inviterData[10] =inboxList
cursor.execute("DELETE FROM users WHERE [email] = ?",(inviter,))
cursor.execute("INSERT INTO users (email,fullname,password,groupList,reputationScore,status,invitations,blacklist,whitelist,compliments,inbox,referredUsers) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)",tuple(inviterData))
connection.commit()
connection.close()
return (jsonify({
"message": "You've been added to the group {} and your response has been sent to your inviter.".format(groupName)
}))
elif response.lower() == "declined":
#Notify the inviter that their invitation has been declined
cursor.execute("SELECT * FROM users where [email] = ?",(inviter,))
inviterData = list(cursor.fetchone())
inboxList = json.loads(inviteeData[10])
inboxList.append({
"sender": inviter,
"message": message
})
inboxList = json.dumps(inboxList)
inviterData[10] =inboxList
cursor.execute("DELETE FROM users WHERE [email] = ?",(inviter,))
cursor.execute("INSERT INTO users (email,fullname,password,groupList,reputationScore,status,invitations,blacklist,whitelist,compliments,inbox,referredUsers) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)",tuple(inviterData))
connection.commit()
connection.close()
return (jsonify({
"message": "You have declined your invitation to the group {} and your response has been sent to your inviter.".format(groupName)
}))
### CREATE POLLS SECTION ###
# CREATE MEETUP/CLOSE POLL #~Helper
def createMeetCloseHelper(pollType):
jsonData =request.json
#GET DATA FROM FRONT END#
groupName = jsonData["groupName"]
pollData = {}
pollData["pollCreator"] = jsonData["creatorFullName"]
pollData["pollTitle"] = jsonData["pollTitle"]
pollData["pollPrompt"] = jsonData["pollPrompt"]
pollData["pollType"] = pollType
pollData["uuid"] = str(uuid.uuid4())
pollData["pollStatus"] = "ACTIVE"
pollVoteOptions = {}
for option in jsonData["pollVoteOptions"]:
pollVoteOptions[option] = 0
pollData["pollVoteOptions"] = pollVoteOptions
pollData["voters"] = []
pollData["result"] = None
#
#SQL CONNECTION
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
cursor.execute("SELECT * FROM groups WHERE [groupName] = ?"(groupName,))
groupData = list(cursor.fetchone())
groupPolls = json.loads(groupName[4])
groupPolls.append(pollData)
groupPolls = json.dumps(groupPolls)
groupData[4] = groupPolls
cursor.execute("DELETE FROM groups WHERE [groupName] = ?",(groupName,))
cursor.execute("INSERT INTO groups (groupName,status,posts,polls,members) VALUES(?,?,?,?,?)",tuple(groupData))
connection.commit()
connection.close()
@app.route('/createMeetupPoll', methods = ["POST"])
def createMeetupPoll():
createMeetCloseHelper(pollType="MEETUP")
return (jsonify({
"Message": "Your Meetup poll has been created."
}))
@app.route('/createCloseGroupPoll', methods = ["POST"])
def createCloseGroupPoll():
createMeetCloseHelper(pollType="CLOSE")
return (jsonify({
"Message": "Your Close Group poll has been created."
}))
# CREATE WARNPRAISEKICK POLL #~Helper
def createWarnPraiseKickHelper(pollType):
jsonData =request.json
#GET DATA FROM FRONT END#
groupName = jsonData["groupName"]
pollData = {}
pollData["pollCreator"] = jsonData["creatorFullName"]
pollData["targetedMemberEmail"] = jsonData["targetedMemberEmail"]
pollData["targetedMemberName"] = jsonData["targetedMemberName"]
pollData["pollTitle"] = jsonData["pollTitle"]
pollData["pollPrompt"] = jsonData["pollPrompt"]
pollData["pollType"] = pollType
pollData["uuid"] = str(uuid.uuid4())
pollData["pollStatus"] = "ACTIVE"
pollVoteOptions = {}
for option in jsonData["pollVoteOptions"]:
pollVoteOptions[option] = 0
pollData["pollVoteOptions"] = pollVoteOptions
pollData["voters"] = []
pollData["result"] = None
#
#SQL CONNECTION
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
cursor.execute("SELECT * FROM groups WHERE [groupName] = ?"(groupName,))
groupData = list(cursor.fetchone())
memberPolls = json.loads(groupName[3])
memberPolls.append(pollData)
memberPolls = json.dumps(memberPolls)
groupData[3] = memberPolls
cursor.execute("DELETE FROM groups WHERE [groupName] = ?",(groupName,))
cursor.execute("INSERT INTO groups (groupName,status,posts,polls,members) VALUES(?,?,?,?,?)",tuple(groupData))
connection.commit()
connection.close()
@app.route('/createWarningPoll', methods = ["POST"])
def createWarningPoll():
createWarnPraiseKickHelper(pollType="WARNING")
return (jsonify({
"Message": "Your warning poll has been created."
}))
@app.route('/createPraisePoll', methods = ["POST"])
def createPraisePoll():
createWarnPraiseKickHelper(pollType="PRAISE")
return (jsonify{
"Message": "Your Praise poll has been created."
})
@app.route('/createKickPoll', methods = ["POST"])
def createKickPoll():
createMeetCloseHelper(pollType="KICK")
return (jsonify{
"Message": "Your Kick poll has been created."
})
### END CREATE POLLS SECTION ###
### ISSUE VOTES SECTION ###
@app.route('/issueMeetupVote', methods = ["POST"])
def issueMeetupVote():
#GET JSON DATA
jsonData = request.json
pollResponse = jsonData["pollResponse"] #Option they selected
pollResponder = jsonData["email"]
pollUUID = jsonData["pollUUID"]
groupName = jsonData["groupName"]
#
#SQL CONNECTION
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
#REGISTER VOTE
cursor.execute("SELECT * FROM groups WHERE [groupName] = ?"(groupName,))
groupData = list(cursor.fetchone())
groupPolls = json.loads(groupData[4])
for index,poll in enumerate(groupPolls):
if poll["uuid"] == pollUUID:
poll["voters"].append(pollResponder)
pollVoteOptions = poll["pollVoteOptions"]
pollVoteOptions[pollResponse] += 1
poll["pollVoteOptions"] = pollVoteOptions
groupPolls[index] = poll
break
groupPolls = json.dumps(groupPolls)
groupData[4] = groupPolls
cursor.execute("DELETE FROM groups WHERE [groupName] = ?",(groupName,))
cursor.execute("INSERT INTO groups (groupName,status,posts,polls,members) VALUES(?,?,?,?,?)",tuple(groupData))
connection.commit()
#COUNT TOTAL VOTES
groupPolls = json.loads(groupPolls)
sumVotes = 0
for index,poll in enumerate(groupPolls):
if poll["uuid"] == pollUUID:
pollVoteOptions = poll["pollVoteOptions"]
for option,voteCount in pollVoteOptions.items():
sumVotes += voteCount
break
totalMembers = len(groupData[5])
maxResponseCount = 0
answer = None
#IF TOTAL VOTES == TOTAL MEMBERS, CLOSE POLL
if sumVotes == totalMembers:
for index,poll in enumerate(groupPolls):
if poll["uuid"] == pollUUID:
pollVoteOptions = poll["pollVoteOptions"]
for option,voteCount in pollVoteOptions.items():
if voteCount > maxResponseCount:
maxResponseCount = voteCount
answer = option
poll["result"] = answer
poll["pollStatus"] = "CLOSED"
groupPolls[index] = poll
break
groupPolls = json.dumps(groupPolls)
groupData[4] = groupPolls
cursor.execute("DELETE FROM groups WHERE [groupName] = ?",(groupName,))
cursor.execute("INSERT INTO groups (groupName,status,posts,polls,members) VALUES(?,?,?,?,?)",tuple(groupData))
connection.commit()
connection.close()
return (jsonify({
"Message": "Your meetup vote has been submitted."
}))
@app.route('/issueCloseGroupVote', methods = ["POST"])
def issueCloseGroupVote():
#GET JSON DATA
jsonData = request.json
pollResponse = jsonData["pollResponse"] #Option they selected
pollResponder = jsonData["email"]
pollUUID = jsonData["pollUUID"]
groupName = jsonData["groupName"]
#
#SQL CONNECTION
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
#REGISTER VOTE
cursor.execute("SELECT * FROM groups WHERE [groupName] = ?"(groupName,))
groupData = list(cursor.fetchone())
groupPolls = json.loads(groupData[4])
for index,poll in enumerate(groupPolls):
if poll["uuid"] == pollUUID:
poll["voters"].append(pollResponder)
pollVoteOptions = poll["pollVoteOptions"]
pollVoteOptions[pollResponse] += 1
poll["pollVoteOptions"] = pollVoteOptions
groupPolls[index] = poll
break
groupPolls = json.dumps(groupPolls)
groupData[4] = groupPolls
cursor.execute("DELETE FROM groups WHERE [groupName] = ?",(groupName,))
cursor.execute("INSERT INTO groups (groupName,status,posts,polls,members) VALUES(?,?,?,?,?)",tuple(groupData))
connection.commit()
#COUNT TOTAL VOTES
groupPolls = json.loads(groupPolls)
sumVotes = 0
for index,poll in enumerate(groupPolls):
if poll["uuid"] == pollUUID:
pollVoteOptions = poll["pollVoteOptions"]
for option,voteCount in pollVoteOptions.items():
sumVotes += voteCount
break
totalMembers = len(groupData[5])
maxResponseCount = 0
answer = None
#IF TOTAL VOTES == TOTAL MEMBERS, CLOSE POLL
if sumVotes == totalMembers:
for index,poll in enumerate(groupPolls):
if poll["uuid"] == pollUUID:
pollVoteOptions = poll["pollVoteOptions"]
for option,voteCount in pollVoteOptions.items():
if voteCount > maxResponseCount:
maxResponseCount = voteCount
answer = option
if maxResponseCount == totalMembers:
if answer.lower() == "yes":
poll["result"] = answer
poll["pollStatus"] = "CLOSED"
groupPolls[index] = poll
#NOTIFY SUPER USER THAT GROUP MUST BE CLOSED
reportMessage = "Members have voted to close this group."
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
cursor.execute("INSERT INTO moderationRequests (subject,message,type,status,number) VALUES(?,?,?,?,?)",(groupName,reportMessage,"CLOSE","OPEN",None))
connection.commit()
else:
poll["result"] = answer
poll["pollStatus"] = "CLOSED"
groupPolls[index] = poll
break
groupPolls = json.dumps(groupPolls)
groupData[4] = groupPolls
cursor.execute("DELETE FROM groups WHERE [groupName] = ?",(groupName,))
cursor.execute("INSERT INTO groups (groupName,status,posts,polls,members) VALUES(?,?,?,?,?)",tuple(groupData))
connection.commit()
connection.close()
return (jsonify({
"Message": "Your meetup vote has been submitted."
}))
# REGISTER MEMBER POLL VOTE #~HELPER
def registerVote(cursor,groupName,connection,pollUUID):
cursor.execute("SELECT * FROM groups WHERE [groupName] = ?"(groupName,))
groupData = list(cursor.fetchone())
memberPolls = json.loads(groupData[3])
for index,poll in enumerate(memberPolls):
if poll["uuid"] == pollUUID:
poll["voters"].append(pollResponder)
pollVoteOptions = poll["pollVoteOptions"]
pollVoteOptions[pollResponse] += 1
poll["pollVoteOptions"] = pollVoteOptions
memberPolls[index] = poll
break
memberPolls = json.dumps(memberPolls)
groupData[3] = memberPolls
cursor.execute("DELETE FROM groups WHERE [groupName] = ?",(groupName,))
cursor.execute("INSERT INTO groups (groupName,status,posts,polls,members) VALUES(?,?,?,?,?)",tuple(groupData))
connection.commit()
# HANDLE POLL CLOSURE #~HELPER
def handleWarningPraiseKickVote(cursor,groupName,pollType,connection,pollUUID,pollTargetedMemberEmail):
cursor.execute("SELECT * FROM groups WHERE [groupName] = ?"(groupName,))
groupData = list(cursor.fetchone())
memberPolls = json.loads(groupData[3])
sumVotes = 0 #Count of the total sum of votes
totalMembers = len(groupData[5]) # Cross checks to see if all votes have been registered
maxResponseCount = 0 # Checks to see if it's actually unanimous
answer = None #Answer field
for index,poll in enumerate(memberPolls):
if poll["uuid"] == pollUUID:
pollVoteOptions = poll["pollVoteOptions"]
for option,voteCount in pollVoteOptions.items():
sumVotes += voteCount
if voteCount > maxResponseCount:
maxResponseCount = voteCount
answer = option
break
if sumVotes == (totalMembers -1) == maxResponseCount: #We have all votes, and they were unanimous
for index,poll in enumerate(memberPolls):
if poll["uuid"] == pollUUID:
poll["result"] = answer
poll["pollStatus"] = "CLOSED"
memberPolls[index] = poll
break
groupData[3] = json.dumps(memberPolls) #update member polls
if answer.lower() == "yes":
memberList = json.loads(groupData[5])
for member in memberList:
if member["member"] == pollTargetedMemberEmail:
member[pollType] += 1
groupData[5] = json.dumps(memberList)
cursor.execute("DELETE FROM groups WHERE [groupName] = ?",(groupName,))
cursor.execute("INSERT INTO groups (groupName,status,posts,polls,members) VALUES(?,?,?,?,?)",tuple(groupData))
connection.commit()
elif sumVotes == (totalMembers - 1): #We have all votes, and they were not unanimous
for index,poll in enumerate(memberPolls):
if poll["uuid"] == pollUUID:
poll["result"] = "Not unanimous"
poll["pollStatus"] = "CLOSED"
memberPolls[index] = poll
break
groupData[3] = json.dumps(memberPolls) #update member polls
cursor.execute("DELETE FROM groups WHERE [groupName] = ?",(groupName,))
cursor.execute("INSERT INTO groups (groupName,status,posts,polls,members) VALUES(?,?,?,?,?)",tuple(groupData))
connection.commit()
@app.route('/issueWarningVote', methods = ["POST"])
def issueWarningVote():
#GET DATA FROM FRONT END
jsonData = request.json
pollResponse = jsonData["pollResponse"] #Option they selected
pollResponder = jsonData["voterEmail"]
pollUUID = jsonData["pollUUID"]
pollTargetedMemberEmail = jsonData["targetedMemberEmail"]
groupName = jsonData["groupName"]
#
#SQL CONNECTION
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
#REGISTER VOTE INTO POLL
registerVote(cursor = cursor, groupName= groupName, connection= connection,pollUUID=pollUUID)
#
#CHECK IF POLL IS COMPLETE - if so, handle the unanimous/non-unanimous outcomes
handleWarningPraiseClosure(cursor = cursor,groupName= groupName,pollType = "warnings",connection = connection,pollUUID=pollUUID,pollTargetedMemberEmail=pollTargetedMemberEmail)
#Check the warning count for members and kick out if necessary
cursor.execute("SELECT * FROM groups WHERE [groupName] = ?"(groupName,))
groupData = list(cursor.fetchone())
memberList = json.loads(groupData[5])
adjustMember = False
memberIndex = None
for index,member in enumerate(memberList):
if member["member"] == pollTargetedMemberEmail:
if member["warnings"] >= 3: #User needs to be kicked out and points deducted
memberIndex = index
adjustMember = True
break
if adjustMember:
del memberList[memberIndex]
groupData[5] = json.dumps(memberList) #update member warning
cursor.execute("DELETE FROM groups WHERE [groupName] = ?",(groupName,))
cursor.execute("INSERT INTO groups (groupName,status,posts,polls,members) VALUES(?,?,?,?,?)",tuple(groupData))
connection.commit()
#
#Adjust the user's points and notify them about being kicked out
cursor.execute("SELECT * FROM users WHERE [email] = ?"(pollTargetedMemberEmail,))
userData = cursor.fetchone()
userData = list(inviteeData)
if adjustMember:
#Deduct points
userData[4] -= 5
#Remove from group
groupList = json.loads(userData[3])
groupList.remove(groupName)
userData[3] = json.dumps(groupList)
#Notify member
inboxList = json.loads(userData[10])
inboxList.append({
"sender": groupName,
"message": "You've received 3 warnings from {} and incurred a 5 point deduction.".format(groupName)
})
userData[10] = json.dumps(inboxList)
cursor.execute("DELETE * FROM users WHERE [email] = ?", (pollTargetedMemberEmail,))
cursor.execute("INSERT INTO users (email,fullname,password,groupList,reputationScore,status,invitations,blacklist,whitelist,compliments,inbox,referredUsers) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)",tuple(userData))
connection.commit()
#Close Database connection and notify use that their vote has been registered
connection.close()
return (jsonify({
"Message": "Your vote has been submitted."
}))
@app.route('/issuePraiseVote', methods = ["POST"])
def issuePraiseVote():
#GET DATA FROM FRONT END
jsonData = request.json
pollResponse = jsonData["pollResponse"] #Option they selected
pollResponder = jsonData["voterEmail"]
pollUUID = jsonData["pollUUID"]
pollTargetedMemberEmail = jsonData["targetedMemberEmail"]
groupName = jsonData["groupName"]
#
#SQL CONNECTION
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
#REGISTER VOTE INTO POLL
registerVote(cursor = cursor, groupName= groupName, connection= connection,pollUUID=pollUUID)
#
#CHECK IF POLL IS COMPLETE - if so, handle the unanimous/non-unanimous outcomes
handleWarningPraiseClosure(cursor = cursor,groupName= groupName,pollType = "praises",connection = connection,pollUUID=pollUUID,pollTargetedMemberEmail=pollTargetedMemberEmail)
#Check the warning count for members and kick out if necessary
cursor.execute("SELECT * FROM groups WHERE [groupName] = ?"(groupName,))
groupData = list(cursor.fetchone())
memberList = json.loads(groupData[5])
adjustMember = False
memberIndex = None
for index,member in enumerate(memberList):
if member["member"] == pollTargetedMemberEmail:
if member["praises"] >= 3: #User needs to be kicked out and points deducted
memberIndex = index
adjustMember = True
break
if adjustMember:
memberList[memberIndex]["praises"] = 0
groupData[5] = json.dumps(memberList) #update member praise
cursor.execute("DELETE FROM groups WHERE [groupName] = ?",(groupName,))
cursor.execute("INSERT INTO groups (groupName,status,posts,polls,members) VALUES(?,?,?,?,?)",tuple(groupData))
connection.commit()
#
#Adjust the user's points and notify them that they've received a praise
cursor.execute("SELECT * FROM users WHERE [email] = ?"(pollTargetedMemberEmail,))
userData = cursor.fetchone()
userData = list(inviteeData)
if adjustMember:
#Deduct points
userData[4] += 5
#Notify member
inboxList = json.loads(userData[10])
inboxList.append({
"sender": groupName,
"message": "You've received 3 praises from {} and was granted a 5 point increase! Congrats! Keep up the great work!".format(groupName)
})
userData[10] = json.dumps(inboxList)
cursor.execute("DELETE * FROM users WHERE [email] = ?", (pollTargetedMemberEmail,))
cursor.execute("INSERT INTO users (email,fullname,password,groupList,reputationScore,status,invitations,blacklist,whitelist,compliments,inbox,referredUsers) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)",tuple(userData))
connection.commit()
#Close Database connection and notify use that their vote has been registered
connection.close()
managePointStatus(pollTargetedMemberEmail)
return (jsonify({
"Message": "Your vote has been submitted."
}))
@app.route('/issueKickVote', methods = ["POST"])
def issueKickVote():
#GET DATA FROM FRONT END
jsonData = request.json
pollResponse = jsonData["pollResponse"] #Option they selected
pollResponder = jsonData["voterEmail"]
pollUUID = jsonData["pollUUID"]
pollTargetedMemberEmail = jsonData["targetedMemberEmail"]
groupName = jsonData["groupName"]
#
#SQL CONNECTION
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
#REGISTER VOTE INTO POLL
registerVote(cursor = cursor, groupName= groupName, connection= connection,pollUUID=pollUUID)
#
#CHECK IF POLL IS COMPLETE - if so, handle the unanimous/non-unanimous outcomes
handleWarningPraiseClosure(cursor = cursor,groupName= groupName,pollType = "kicks",connection = connection,pollUUID=pollUUID,pollTargetedMemberEmail=pollTargetedMemberEmail)
#Check the kick count and kick out if necessary
cursor.execute("SELECT * FROM groups WHERE [groupName] = ?"(groupName,))
groupData = list(cursor.fetchone())
memberList = json.loads(groupData[5])
adjustMember = False
memberIndex = None
for index,member in enumerate(memberList):
if member["member"] == pollTargetedMemberEmail:
if member["kicks"] >= 1: #User needs to be kicked out and points deducted
memberIndex = index
adjustMember = True
break
if adjustMember:
del memberList[memberIndex]
groupData[5] = json.dumps(memberList) #update member warning
cursor.execute("DELETE FROM groups WHERE [groupName] = ?",(groupName,))
cursor.execute("INSERT INTO groups (groupName,status,posts,polls,members) VALUES(?,?,?,?,?)",tuple(groupData))
connection.commit()
#
#Adjust the user's points and notify them about being kicked out
cursor.execute("SELECT * FROM users WHERE [email] = ?"(pollTargetedMemberEmail,))
userData = cursor.fetchone()
userData = list(inviteeData)
if adjustMember:
#Deduct points
userData[4] -= 10
#Remove from group
groupList = json.loads(userData[3])
groupList.remove(groupName)
userData[3] = json.dumps(groupList)
#Notify member
inboxList = json.loads(userData[10])
inboxList.append({
"sender": groupName,
"message": "You have been kicked from {} and incurred a 10 point deduction.".format(groupName)
})
userData[10] = json.dumps(inboxList)
cursor.execute("DELETE * FROM users WHERE [email] = ?", (pollTargetedMemberEmail,))
cursor.execute("INSERT INTO users (email,fullname,password,groupList,reputationScore,status,invitations,blacklist,whitelist,compliments,inbox,referredUsers) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)",tuple(userData))
connection.commit()
#Close Database connection and notify use that their vote has been registered
connection.close()
managePointStatus(pollTargetedMemberEmail)
return (jsonify({
"Message": "Your vote has been submitted."
}))
@app.route('/issueComplimentVote', methods = ["POST"])
def issueCompliment():
#GET DATA FROM FRONT END
jsonData = request.json
complimentReceiverEmail = jsonData["complimentReceiverEmail"]
#
#SQL CONNECTION
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
#Increase Compliments/Score
cursor.execute("SELECT * FROM users WHERE [email] = ?"(complimentReceiverEmail,))
userData = cursor.fetchone()
userData = list(inviteeData)
userData[9] += 1
if userData[9] >= 3:
userData[9] = 0
userData[4] += 5
inboxList = json.loads(userData[10])
inboxList.append({
"sender": groupName,
"message": "You've received 3 compliments and a 5 point increase!".format(groupName)
})
userData[10] = json.dumps(inboxList)
cursor.execute("DELETE * FROM users WHERE [email] = ?", (complimentReceiverEmail,))
cursor.execute("INSERT INTO users (email,fullname,password,groupList,reputationScore,status,invitations,blacklist,whitelist,compliments,inbox,referredUsers) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)",tuple(userData))
connection.commit()
connection.close()
managePointStatus(complimentReceiverEmail)
#Return
return (jsonify({
"Message": "Your compliment has been sent!"
}))
###END ISSUE VOTES SECTION###
### ADD TO WHITEBOX/BLACKBOX SECTION ###
# ADD TO AUTOBOX #~HELPER
def addtoAutoBox(cursor,connection,userEmail,emailAddition,index):
#Add user to autoBox
cursor.execute("SELECT * FROM users WHERE [email] = ?"(userEmail,))
userData = cursor.fetchone()
userData = list(userData)
autoBox = userData[index]
autoBox = json.loads(autoBox)
if emailAddition not in autoBox:
autoBox.append(emailAddition)
autoBox = json.dumps(autoBox)
userData[index] = autoBox
cursor.execute("DELETE * FROM users WHERE [email] = ?", (userEmail,))
cursor.execute("INSERT INTO users (email,fullname,password,groupList,reputationScore,status,invitations,blacklist,whitelist,compliments,inbox,referredUsers) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)",tuple(userData))
connection.commit()
@app.route('/addToWhiteBox', methods = ["POST"])
def addToWhiteBox():
#GET DATA FROM FRONT END
jsonData = request.json
emailAddition = jsonData["emailAddition"]
userEmail = jsonData["userEmail"]
#
#SQL CONNECTION
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
#Add user to autoBox
addtoAutoBox(cursor = cursor, connection = connection,userEmail=userEmail ,emailAddition=emailAddition,index = 8)
connection.close()
#Return
return (jsonify({
"Message": "The user has been registered to your whitebox.".format(emailAddition)
}))
@app.route('/addToBlackBox', methods = ["POST"])
def addToBlackBox():
#GET DATA FROM FRONT END
jsonData = request.json
emailAddition = jsonData["emailAddition"]
userEmail = jsonData["userEmail"]
#
#SQL CONNECTION
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
#Add user to autoBox
addtoAutoBox(cursor = cursor, connection = connection,userEmail=userEmail ,emailAddition=emailAddition,index = 7)
connection.close()
#Return
return (jsonify({
"Message": "{} has been registered to your blackbox.".format(emailAddition)
}))
### END ADD TO WHITEBOX/BLACKBOX SECTION ###
| 39.460526
| 222
| 0.641714
| 3,495
| 35,988
| 6.606867
| 0.085837
| 0.042787
| 0.021394
| 0.025898
| 0.79191
| 0.772206
| 0.752847
| 0.737517
| 0.731974
| 0.718808
| 0
| 0.006063
| 0.230104
| 35,988
| 912
| 223
| 39.460526
| 0.827336
| 0.083028
| 0
| 0.755043
| 0
| 0.017291
| 0.257309
| 0.067883
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.017291
| 0.005764
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e88c05729e7f05dd2472b9b99aa9fe8260618229
| 155
|
py
|
Python
|
rfcutils/__init__.py
|
RFChallenge/rfchallenge_starter
|
724a52f68541d3f9c3f460d88fe4e2be9662aa49
|
[
"MIT"
] | 6
|
2021-08-20T06:01:04.000Z
|
2022-01-05T14:24:32.000Z
|
rfcutils/__init__.py
|
RFChallenge/rfchallenge_starter
|
724a52f68541d3f9c3f460d88fe4e2be9662aa49
|
[
"MIT"
] | 1
|
2021-08-19T16:32:28.000Z
|
2021-08-19T16:32:28.000Z
|
rfcutils/__init__.py
|
RFChallenge/rfchallenge_starter
|
724a52f68541d3f9c3f460d88fe4e2be9662aa49
|
[
"MIT"
] | null | null | null |
from .dataset_helper_fn import *
from .qpsk_helper_fn import *
from .sigmf_helper_fn import *
from .mixture_helper_fn import *
from .eval_utils_fn import *
| 31
| 32
| 0.812903
| 25
| 155
| 4.64
| 0.4
| 0.344828
| 0.482759
| 0.62069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122581
| 155
| 5
| 33
| 31
| 0.852941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
e88e02cead250e93f2ed5ea50e1b6e7005673317
| 83
|
py
|
Python
|
pytracking/tracker/dimp_simple/__init__.py
|
sehomi/pyCFTrackers
|
4dbd550fbac78f4e7e35fdb4a1761b5b0cf9b096
|
[
"MIT"
] | null | null | null |
pytracking/tracker/dimp_simple/__init__.py
|
sehomi/pyCFTrackers
|
4dbd550fbac78f4e7e35fdb4a1761b5b0cf9b096
|
[
"MIT"
] | null | null | null |
pytracking/tracker/dimp_simple/__init__.py
|
sehomi/pyCFTrackers
|
4dbd550fbac78f4e7e35fdb4a1761b5b0cf9b096
|
[
"MIT"
] | null | null | null |
from .dimp_simple import DiMPSimple
def get_tracker_class():
return DiMPSimple
| 20.75
| 35
| 0.807229
| 11
| 83
| 5.818182
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144578
| 83
| 4
| 36
| 20.75
| 0.901408
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
e8ce533206148db3b6abc48a30da83230cf6fa5f
| 62
|
py
|
Python
|
src/core/utils/__init__.py
|
firewut/data-transform-pipelines-api
|
c62a7aa5fd57102fa67cf715dc78c3365b739925
|
[
"MIT"
] | 2
|
2019-01-09T07:42:17.000Z
|
2021-08-25T02:43:47.000Z
|
src/core/utils/__init__.py
|
firewut/data-transform-pipelines-api
|
c62a7aa5fd57102fa67cf715dc78c3365b739925
|
[
"MIT"
] | null | null | null |
src/core/utils/__init__.py
|
firewut/data-transform-pipelines-api
|
c62a7aa5fd57102fa67cf715dc78c3365b739925
|
[
"MIT"
] | null | null | null |
from core.utils.dict import *
from core.utils.random import *
| 20.666667
| 31
| 0.774194
| 10
| 62
| 4.8
| 0.6
| 0.333333
| 0.541667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 62
| 2
| 32
| 31
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2cd7816a8419502b8921acd3cab29ac7134be118
| 1,654
|
py
|
Python
|
tests/test_open.py
|
K0IN/pastebin-as-file
|
7968266f993a9976f1fd9af66d356bf72fa1393a
|
[
"MIT"
] | null | null | null |
tests/test_open.py
|
K0IN/pastebin-as-file
|
7968266f993a9976f1fd9af66d356bf72fa1393a
|
[
"MIT"
] | null | null | null |
tests/test_open.py
|
K0IN/pastebin-as-file
|
7968266f993a9976f1fd9af66d356bf72fa1393a
|
[
"MIT"
] | null | null | null |
import requests_mock
import pastebinfs.sync
import pytest
def test_open_without_openmode(requests_mock: requests_mock.Mocker):
with pytest.raises(ValueError, match='must have exactly one of create/read/write/append mode'):
pastebinfs.sync.pastebin_open("a.txt", "", "api_key", "username", "password")
def test_open_with_incompatible_openmode(requests_mock: requests_mock.Mocker):
with pytest.raises(ValueError, match='must have exactly one of create/read/write/append mode'):
pastebinfs.sync.pastebin_open("a.txt", "rw", "api_key", "username", "password")
with pytest.raises(ValueError, match='must have exactly one of create/read/write/append mode'):
pastebinfs.sync.pastebin_open("a.txt", "ra", "api_key", "username", "password")
with pytest.raises(ValueError, match='must have exactly one of create/read/write/append mode'):
pastebinfs.sync.pastebin_open("a.txt", "wa", "api_key", "username", "password")
with pytest.raises(ValueError, match='must have exactly one of create/read/write/append mode'):
pastebinfs.sync.pastebin_open("a.txt", "rwa", "api_key", "username", "password")
with pytest.raises(ValueError, match='must have exactly one of create/read/write/append mode'):
pastebinfs.sync.pastebin_open("a.txt", "rwa+", "api_key", "username", "password")
with pytest.raises(ValueError, match=r'open mode must be either \(t\)ext or \(b\)inary'):
pastebinfs.sync.pastebin_open("a.txt", "rtb", "api_key", "username", "password")
def test_open_existing_file(requests_mock: requests_mock.Mocker):
pass# if flag is x or r then the file must exist
| 53.354839
| 99
| 0.719468
| 233
| 1,654
| 4.974249
| 0.244635
| 0.096635
| 0.096635
| 0.157032
| 0.849871
| 0.823986
| 0.798102
| 0.741156
| 0.741156
| 0.741156
| 0
| 0
| 0.139661
| 1,654
| 30
| 100
| 55.133333
| 0.814476
| 0.025393
| 0
| 0.285714
| 0
| 0
| 0.362112
| 0.089441
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.380952
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
2cebbd4894b644e58b5398d398f8e72a97fb68b8
| 135
|
py
|
Python
|
user_profile/admin.py
|
LD31D/django_blog
|
df4f336fa9d58aff87abb32c0a9f7791b8fc0eeb
|
[
"MIT"
] | null | null | null |
user_profile/admin.py
|
LD31D/django_blog
|
df4f336fa9d58aff87abb32c0a9f7791b8fc0eeb
|
[
"MIT"
] | 1
|
2020-12-04T06:59:00.000Z
|
2020-12-04T20:17:58.000Z
|
user_profile/admin.py
|
LD31D/django_blog
|
df4f336fa9d58aff87abb32c0a9f7791b8fc0eeb
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
@admin.register(UserProfile)
class UserProfileAdmin(admin.ModelAdmin):
pass
| 15
| 41
| 0.8
| 16
| 135
| 6.75
| 0.75
| 0.203704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118519
| 135
| 8
| 42
| 16.875
| 0.907563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
fa1346de2006fcdbc7f15c1ff28e1cd87e468231
| 81,778
|
py
|
Python
|
models/py_utils/kp.py
|
Yunnglin/Chart-to-text
|
86f3291930289a4739f658c590e208771759ee50
|
[
"BSD-3-Clause"
] | 30
|
2021-03-03T02:16:30.000Z
|
2022-02-23T10:46:36.000Z
|
models/py_utils/kp.py
|
Yunnglin/Chart-to-text
|
86f3291930289a4739f658c590e208771759ee50
|
[
"BSD-3-Clause"
] | 16
|
2021-03-30T07:50:03.000Z
|
2022-03-03T04:56:30.000Z
|
models/py_utils/kp.py
|
Yunnglin/Chart-to-text
|
86f3291930289a4739f658c590e208771759ee50
|
[
"BSD-3-Clause"
] | 15
|
2021-03-03T06:21:19.000Z
|
2022-02-25T10:01:36.000Z
|
import numpy as np
import torch
import torch.nn as nn
from .utils import convolution, residual
from .utils import make_layer, make_layer_revr, cls, offset, line_cls
import time
from .kp_utils import _tranpose_and_gather_feat, _decode, _decode_pure, _decode_gt, _decode_line_cls, _decode_pure_cls, _decode_pure_line
from .kp_utils import _sigmoid, _ae_loss, _regr_loss, _neg_loss, _ae_line_loss, _offset_loss
from .kp_utils import make_tl_layer, make_br_layer, make_kp_layer, make_center_layer
from .kp_utils import make_pool_layer, make_unpool_layer
from .kp_utils import make_merge_layer, make_inter_layer, make_cnv_layer
class kp_module(nn.Module):
def __init__(
self, n, dims, modules, layer=residual,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, **kwargs
):
super(kp_module, self).__init__()
self.n = n
curr_mod = modules[0]
next_mod = modules[1]
curr_dim = dims[0]
next_dim = dims[1]
self.up1 = make_up_layer(
3, curr_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.max1 = make_pool_layer(curr_dim)
self.low1 = make_hg_layer(
3, curr_dim, next_dim, curr_mod,
layer=layer, **kwargs
)
self.low2 = kp_module(
n - 1, dims[1:], modules[1:], layer=layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer,
**kwargs
) if self.n > 1 else \
make_low_layer(
3, next_dim, next_dim, next_mod,
layer=layer, **kwargs
)
self.low3 = make_hg_layer_revr(
3, next_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.up2 = make_unpool_layer(curr_dim)
self.merge = make_merge_layer(curr_dim)
def forward(self, x):
up1 = self.up1(x)
max1 = self.max1(x)
low1 = self.low1(max1)
low2 = self.low2(low1)
low3 = self.low3(low2)
up2 = self.up2(low3)
return self.merge(up1, up2)
class kp(nn.Module):
def __init__(
self, n, nstack, dims, modules, out_dim, pre=None, cnv_dim=256,
make_tl_layer=make_tl_layer, make_br_layer=make_br_layer,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual
):
super(kp, self).__init__()
print("use kp")
self.nstack = nstack
self._decode = _decode
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.tl_cnvs = nn.ModuleList([
make_tl_layer(cnv_dim) for _ in range(nstack)
])
self.br_cnvs = nn.ModuleList([
make_br_layer(cnv_dim) for _ in range(nstack)
])
## keypoint heatmaps
self.tl_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
self.br_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
## tags
self.tl_tags = nn.ModuleList([
make_tag_layer(cnv_dim, curr_dim, 1) for _ in range(nstack)
])
self.br_tags = nn.ModuleList([
make_tag_layer(cnv_dim, curr_dim, 1) for _ in range(nstack)
])
for tl_heat, br_heat in zip(self.tl_heats, self.br_heats):
tl_heat[-1].bias.data.fill_(-2.19)
br_heat[-1].bias.data.fill_(-2.19)
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.tl_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.br_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.relu = nn.ReLU(inplace=True)
def _train(self, *xs):
image = xs[0]
tl_inds = xs[1]
br_inds = xs[2]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_tags, self.br_tags,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
tl_cnv_, br_cnv_ = layer[2:4]
tl_heat_, br_heat_ = layer[4:6]
tl_tag_, br_tag_ = layer[6:8]
tl_regr_, br_regr_ = layer[8:10]
kp = kp_(inter)
cnv = cnv_(kp)
tl_cnv = tl_cnv_(cnv)
br_cnv = br_cnv_(cnv)
tl_heat, br_heat = tl_heat_(tl_cnv), br_heat_(br_cnv)
tl_tag, br_tag = tl_tag_(tl_cnv), br_tag_(br_cnv)
tl_regr, br_regr = tl_regr_(tl_cnv), br_regr_(br_cnv)
tl_tag = _tranpose_and_gather_feat(tl_tag, tl_inds)
br_tag = _tranpose_and_gather_feat(br_tag, br_inds)
tl_regr = _tranpose_and_gather_feat(tl_regr, tl_inds)
br_regr = _tranpose_and_gather_feat(br_regr, br_inds)
outs += [tl_heat, br_heat, tl_tag, br_tag, tl_regr, br_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def _test(self, *xs, **kwargs):
image = xs[0]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_tags, self.br_tags,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
tl_cnv_, br_cnv_ = layer[2:4]
tl_heat_, br_heat_ = layer[4:6]
tl_tag_, br_tag_ = layer[6:8]
tl_regr_, br_regr_ = layer[8:10]
kp = kp_(inter)
cnv = cnv_(kp)
if ind == self.nstack - 1:
tl_cnv = tl_cnv_(cnv)
br_cnv = br_cnv_(cnv)
tl_heat, br_heat = tl_heat_(tl_cnv), br_heat_(br_cnv)
tl_tag, br_tag = tl_tag_(tl_cnv), br_tag_(br_cnv)
tl_regr, br_regr = tl_regr_(tl_cnv), br_regr_(br_cnv)
outs += [tl_heat, br_heat, tl_tag, br_tag, tl_regr, br_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return self._decode(*outs[-6:], **kwargs)
def forward(self, *xs, **kwargs):
if len(xs) > 1:
return self._train(*xs, **kwargs)
return self._test(*xs, **kwargs)
class kp_cls_pure(nn.Module):
def __init__(
self, n, nstack, dims, modules, out_dim, pre=None, cnv_dim=256,
make_tl_layer=make_tl_layer, make_br_layer=make_br_layer,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual
):
super(kp_cls_pure, self).__init__()
print("use kp")
self.nstack = nstack
self._decode = _decode_pure_cls
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.tl_cnvs = nn.ModuleList([
make_tl_layer(cnv_dim) for _ in range(nstack)
])
self.br_cnvs = nn.ModuleList([
make_br_layer(cnv_dim) for _ in range(nstack)
])
## keypoint heatmaps
self.tl_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
self.br_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
for tl_heat, br_heat in zip(self.tl_heats, self.br_heats):
tl_heat[-1].bias.data.fill_(-2.19)
br_heat[-1].bias.data.fill_(-2.19)
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.tl_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.br_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.relu = nn.ReLU(inplace=True)
self.cls = cls(2, cnv_dim, cnv_dim, 3, stride=2)
self.offset = offset(2, cnv_dim, cnv_dim, 1, stride=2)
def _train(self, *xs):
image = xs[0]
tl_inds = xs[1]
br_inds = xs[2]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_regrs, self.br_regrs,
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
tl_cnv_, br_cnv_ = layer[2:4]
tl_heat_, br_heat_ = layer[4:6]
tl_regr_, br_regr_ = layer[6:8]
kp = kp_(inter)
cnv = cnv_(kp)
tl_cnv = tl_cnv_(cnv)
br_cnv = br_cnv_(cnv)
tl_heat, br_heat = tl_heat_(tl_cnv), br_heat_(br_cnv)
tl_regr, br_regr = tl_regr_(tl_cnv), br_regr_(br_cnv)
tl_regr = _tranpose_and_gather_feat(tl_regr, tl_inds)
br_regr = _tranpose_and_gather_feat(br_regr, br_inds)
outs += [tl_heat, br_heat, tl_regr, br_regr]
if ind == self.nstack - 1:
cls_p = self.cls(cnv)
offset_p = self.offset(cnv)
outs += [cls_p, offset_p]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def _test(self, *xs, **kwargs):
image = xs[0]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_regrs, self.br_regrs,
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
tl_cnv_, br_cnv_ = layer[2:4]
tl_heat_, br_heat_ = layer[4:6]
tl_regr_, br_regr_ = layer[6:8]
kp = kp_(inter)
cnv = cnv_(kp)
if ind == self.nstack - 1:
tl_cnv = tl_cnv_(cnv)
br_cnv = br_cnv_(cnv)
cls_p = self.cls(cnv)
offset_p = self.offset(cnv)
tl_heat, br_heat = tl_heat_(tl_cnv), br_heat_(br_cnv)
tl_regr, br_regr = tl_regr_(tl_cnv), br_regr_(br_cnv)
outs += [tl_heat, br_heat, tl_regr, br_regr]
outs += [cls_p, offset_p]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return self._decode(*outs[-6:], **kwargs), 0, 0
def forward(self, *xs, **kwargs):
if len(xs) > 1:
return self._train(*xs, **kwargs)
return self._test(*xs, **kwargs)
class kp_gt(nn.Module):
def __init__(
self, n, nstack, dims, modules, out_dim, pre=None, cnv_dim=256,
make_tl_layer=make_tl_layer, make_br_layer=make_br_layer,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual
):
super(kp_gt, self).__init__()
print("use kp")
self.nstack = nstack
self._decode = _decode_gt
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.tl_cnvs = nn.ModuleList([
make_tl_layer(cnv_dim) for _ in range(nstack)
])
self.br_cnvs = nn.ModuleList([
make_br_layer(cnv_dim) for _ in range(nstack)
])
## keypoint heatmaps
self.tl_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
self.br_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
## tags
self.tl_tags = nn.ModuleList([
make_tag_layer(cnv_dim, curr_dim, 1) for _ in range(nstack)
])
self.br_tags = nn.ModuleList([
make_tag_layer(cnv_dim, curr_dim, 1) for _ in range(nstack)
])
for tl_heat, br_heat in zip(self.tl_heats, self.br_heats):
tl_heat[-1].bias.data.fill_(-2.19)
br_heat[-1].bias.data.fill_(-2.19)
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.tl_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.br_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.relu = nn.ReLU(inplace=True)
def _train(self, *xs):
image = xs[0]
tl_inds = xs[1]
br_inds = xs[2]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_tags, self.br_tags,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
tl_cnv_, br_cnv_ = layer[2:4]
tl_heat_, br_heat_ = layer[4:6]
tl_tag_, br_tag_ = layer[6:8]
tl_regr_, br_regr_ = layer[8:10]
kp = kp_(inter)
cnv = cnv_(kp)
tl_cnv = tl_cnv_(cnv)
br_cnv = br_cnv_(cnv)
tl_heat, br_heat = tl_heat_(tl_cnv), br_heat_(br_cnv)
tl_tag, br_tag = tl_tag_(tl_cnv), br_tag_(br_cnv)
tl_regr, br_regr = tl_regr_(tl_cnv), br_regr_(br_cnv)
tl_tag = _tranpose_and_gather_feat(tl_tag, tl_inds)
br_tag = _tranpose_and_gather_feat(br_tag, br_inds)
tl_regr = _tranpose_and_gather_feat(tl_regr, tl_inds)
br_regr = _tranpose_and_gather_feat(br_regr, br_inds)
outs += [tl_heat, br_heat, tl_tag, br_tag, tl_regr, br_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def _test(self, *xs, **kwargs):
image = xs[0]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_tags, self.br_tags,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
tl_cnv_, br_cnv_ = layer[2:4]
tl_heat_, br_heat_ = layer[4:6]
tl_tag_, br_tag_ = layer[6:8]
tl_regr_, br_regr_ = layer[8:10]
kp = kp_(inter)
cnv = cnv_(kp)
if ind == self.nstack - 1:
tl_cnv = tl_cnv_(cnv)
br_cnv = br_cnv_(cnv)
tl_heat, br_heat = tl_heat_(tl_cnv), br_heat_(br_cnv)
tl_tag, br_tag = tl_tag_(tl_cnv), br_tag_(br_cnv)
tl_regr, br_regr = tl_regr_(tl_cnv), br_regr_(br_cnv)
outs += [tl_heat, br_heat, tl_tag, br_tag, tl_regr, br_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return self._decode(*outs[-6:], **kwargs)
def forward(self, *xs, **kwargs):
if len(xs) > 1:
return self._train(*xs, **kwargs)
return self._test(*xs, **kwargs)
class kp_pure(nn.Module):
def __init__(
self, n, nstack, dims, modules, out_dim, pre=None, cnv_dim=256,
make_tl_layer=make_tl_layer, make_br_layer=make_br_layer,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual, if_dcn=False
):
super(kp_pure, self).__init__()
print("use kp pure")
self.nstack = nstack
self._decode = _decode_pure
self.if_dcn = if_dcn
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.tl_cnvs = nn.ModuleList([
make_tl_layer(cnv_dim) for _ in range(nstack)
])
self.br_cnvs = nn.ModuleList([
make_br_layer(cnv_dim) for _ in range(nstack)
])
## keypoint heatmaps
self.tl_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
self.br_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
for tl_heat, br_heat in zip(self.tl_heats, self.br_heats):
tl_heat[-1].bias.data.fill_(-2.19)
br_heat[-1].bias.data.fill_(-2.19)
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.tl_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.br_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.relu = nn.ReLU(inplace=True)
def _train(self, *xs):
image = xs[0]
tl_inds = xs[1]
br_inds = xs[2]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
tl_cnv_, br_cnv_ = layer[2:4]
tl_heat_, br_heat_ = layer[4:6]
tl_regr_, br_regr_ = layer[6:8]
kp = kp_(inter)
cnv = cnv_(kp)
tl_cnv = tl_cnv_(cnv)
br_cnv = br_cnv_(cnv)
tl_heat, br_heat = tl_heat_(tl_cnv), br_heat_(br_cnv)
tl_regr, br_regr = tl_regr_(tl_cnv), br_regr_(br_cnv)
tl_regr = _tranpose_and_gather_feat(tl_regr, tl_inds)
br_regr = _tranpose_and_gather_feat(br_regr, br_inds)
outs += [tl_heat, br_heat, tl_regr, br_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def _test(self, *xs, **kwargs):
image = xs[0]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
tl_cnv_, br_cnv_ = layer[2:4]
tl_heat_, br_heat_ = layer[4:6]
tl_regr_, br_regr_ = layer[6:8]
kp = kp_(inter)
cnv = cnv_(kp)
if self.if_dcn:
cnv = self.dcn(cnv)
if ind == self.nstack - 1:
tl_cnv = tl_cnv_(cnv)
br_cnv = br_cnv_(cnv)
tl_heat, br_heat = tl_heat_(tl_cnv), br_heat_(br_cnv)
tl_regr, br_regr = tl_regr_(tl_cnv), br_regr_(br_cnv)
outs += [tl_heat, br_heat, tl_regr, br_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return self._decode(*outs[-4:], **kwargs), 0, 0
def forward(self, *xs, **kwargs):
if len(xs) > 1:
return self._train(*xs, **kwargs)
return self._test(*xs, **kwargs)
class kp_pure_line_cls(nn.Module):
def __init__(
self, n, nstack, dims, modules, out_dim, pre=None, cnv_dim=256,
make_tl_layer=make_tl_layer, make_br_layer=make_br_layer,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual, if_dcn=False
):
super(kp_pure_line_cls, self).__init__()
print("use kp pure")
self.nstack = nstack
self._decode = _decode_line_cls
self.if_dcn = if_dcn
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cls = nn.ModuleList([
line_cls(cnv_dim*8, 2) for _ in range(nstack)
])
self.relu = nn.ReLU(inplace=True)
def _group_features(self, features, weight):
features = features.view(features.size(0), -1, 4, features.size(2))
weight = weight.view(weight.size(0), -1, 4)
weight = weight.unsqueeze(3)
weighted_features = features * weight
weighted_features = torch.sum(weighted_features, 2)
weighted_features = weighted_features.view(weighted_features.size(0), -1, 8*weighted_features.size(2))
return weighted_features
def _train(self, *xs):
image = xs[0]
ps_inds = xs[1]
ng_inds = xs[2]
ps_weight = xs[3]
ng_weight = xs[4]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs, self.cls
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
cls_ = layer[2]
kp = kp_(inter)
cnv = cnv_(kp)
ps_features = _tranpose_and_gather_feat(cnv, ps_inds)
ng_features = _tranpose_and_gather_feat(cnv, ng_inds)
ps_features_group = self._group_features(ps_features, ps_weight)
ng_features_group = self._group_features(ng_features, ng_weight)
ps_prediction = cls_(ps_features_group)
ng_prediction = cls_(ng_features_group)
outs += [ps_prediction, ng_prediction]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def _test(self, *xs, **kwargs):
image = xs[0]
ps_inds = xs[1]
ng_inds = xs[2]
ps_weight = xs[3]
ng_weight = xs[4]
ps_mask = xs[5]
ng_mask = xs[6]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs, self.cls
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
cls_ = layer[2]
kp = kp_(inter)
cnv = cnv_(kp)
if self.if_dcn:
cnv = self.dcn(cnv)
if ind == self.nstack - 1:
ps_features = _tranpose_and_gather_feat(cnv, ps_inds)
ng_features = _tranpose_and_gather_feat(cnv, ng_inds)
ps_features_group = self._group_features(ps_features, ps_weight)
ng_features_group = self._group_features(ng_features, ng_weight)
ps_prediction = cls_(ps_features_group)
ng_prediction = cls_(ng_features_group)
outs += [ps_prediction, ng_prediction]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return self._decode(*outs[-2:]), 0, 0
def _test_real(self, *xs, **kwargs):
image = xs[0]
inds = xs[1]
weight = xs[2]
mask = xs[3]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs, self.cls
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
cls_ = layer[2]
kp = kp_(inter)
cnv = cnv_(kp)
if self.if_dcn:
cnv = self.dcn(cnv)
if ind == self.nstack - 1:
features = _tranpose_and_gather_feat(cnv, inds)
features_group = self._group_features(features, weight)
prediction = cls_(features_group)
outs += [prediction]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
final_ans = torch.argmax(outs[-1], dim=1)
return final_ans, 0, 0
def forward(self, *xs, **kwargs):
if len(xs) == 5:
return self._train(*xs, **kwargs)
if len(xs) == 7:
return self._test(*xs, **kwargs)
if len(xs) == 4:
return self._test_real(*xs, **kwargs)
class kp_pure_bar(nn.Module):
def __init__(
self, n, nstack, dims, modules, out_dim, pre=None, cnv_dim=256,
make_tl_layer=make_tl_layer, make_br_layer=make_br_layer,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual, if_dcn=False
):
super(kp_pure_bar, self).__init__()
print("use kp pure")
self.nstack = nstack
self._decode = _decode_pure
self.if_dcn = if_dcn
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.tl_cnvs = nn.ModuleList([
make_tl_layer(cnv_dim) for _ in range(nstack)
])
self.br_cnvs = nn.ModuleList([
make_br_layer(cnv_dim) for _ in range(nstack)
])
## keypoint heatmaps
self.tl_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
self.br_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
for tl_heat, br_heat in zip(self.tl_heats, self.br_heats):
tl_heat[-1].bias.data.fill_(-2.19)
br_heat[-1].bias.data.fill_(-2.19)
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.tl_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.br_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.relu = nn.ReLU(inplace=True)
def _train(self, *xs):
image = xs[0]
tl_inds = xs[1]
br_inds = xs[2]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
tl_cnv_, br_cnv_ = layer[2:4]
tl_heat_, br_heat_ = layer[4:6]
tl_regr_, br_regr_ = layer[6:8]
kp = kp_(inter)
cnv = cnv_(kp)
tl_cnv = tl_cnv_(cnv)
br_cnv = br_cnv_(cnv)
tl_heat, br_heat = tl_heat_(tl_cnv), br_heat_(br_cnv)
tl_regr, br_regr = tl_regr_(tl_cnv), br_regr_(br_cnv)
tl_regr = _tranpose_and_gather_feat(tl_regr, tl_inds)
br_regr = _tranpose_and_gather_feat(br_regr, br_inds)
outs += [tl_heat, br_heat, tl_regr, br_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def _test(self, *xs, **kwargs):
image = xs[0]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
tl_cnv_, br_cnv_ = layer[2:4]
tl_heat_, br_heat_ = layer[4:6]
tl_regr_, br_regr_ = layer[6:8]
kp = kp_(inter)
cnv = cnv_(kp)
if self.if_dcn:
cnv = self.dcn(cnv)
if ind == self.nstack - 1:
tl_cnv = tl_cnv_(cnv)
br_cnv = br_cnv_(cnv)
tl_heat, br_heat = tl_heat_(tl_cnv), br_heat_(br_cnv)
tl_regr, br_regr = tl_regr_(tl_cnv), br_regr_(br_cnv)
outs += [tl_heat, br_heat, tl_regr, br_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return self._decode(*outs[-4:], **kwargs), 0, 0
def forward(self, *xs, **kwargs):
if len(xs) > 1:
return self._train(*xs, **kwargs)
return self._test(*xs, **kwargs)
class kp_pure_pie(nn.Module):
def __init__(
self, n, nstack, dims, modules, out_dim, pre=None, cnv_dim=256,
make_tl_layer=make_tl_layer, make_br_layer=make_br_layer,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual, if_dcn=False
):
super(kp_pure_pie, self).__init__()
print("use kp pure pie")
self.nstack = nstack
self._decode = _decode_pure
self.if_dcn = if_dcn
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.tl_cnvs = nn.ModuleList([
make_tl_layer(cnv_dim) for _ in range(nstack)
])
self.br_cnvs = nn.ModuleList([
make_br_layer(cnv_dim) for _ in range(nstack)
])
## keypoint heatmaps
self.tl_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
self.br_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
for tl_heat, br_heat in zip(self.tl_heats, self.br_heats):
tl_heat[-1].bias.data.fill_(-2.19)
br_heat[-1].bias.data.fill_(-2.19)
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.tl_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.br_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.relu = nn.ReLU(inplace=True)
def _train(self, *xs):
image = xs[0]
center_inds = xs[1]
key_inds_tl = xs[2]
key_inds_br = xs[3]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
center_cnv_, key_cnv_ = layer[2:4]
center_heat_, key_heat_ = layer[4:6]
center_regr_, key_regr_ = layer[6:8]
kp = kp_(inter)
cnv = cnv_(kp)
center_cnv = center_cnv_(cnv)
key_cnv = key_cnv_(cnv)
center_heat, key_heat = center_heat_(center_cnv), key_heat_(key_cnv)
center_regr, key_regr = center_regr_(center_cnv), key_regr_(key_cnv)
center_regr = _tranpose_and_gather_feat(center_regr, center_inds)
key_regr_tl = _tranpose_and_gather_feat(key_regr, key_inds_tl)
key_regr_br = _tranpose_and_gather_feat(key_regr, key_inds_br)
outs += [center_heat, key_heat, center_regr, key_regr_tl, key_regr_br]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def _test(self, *xs, **kwargs):
image = xs[0]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
center_cnv_, key_cnv_ = layer[2:4]
center_heat_, key_heat_ = layer[4:6]
center_regr_, key_regr_ = layer[6:8]
kp = kp_(inter)
cnv = cnv_(kp)
if ind == self.nstack - 1:
center_cnv = center_cnv_(cnv)
key_cnv = key_cnv_(cnv)
center_heat, key_heat = center_heat_(center_cnv), key_heat_(key_cnv)
center_regr, key_regr = center_regr_(center_cnv), key_regr_(key_cnv)
outs += [center_heat, key_heat, center_regr, key_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return self._decode(*outs[-4:], **kwargs), 0, 0
def forward(self, *xs, **kwargs):
if len(xs) > 1:
return self._train(*xs, **kwargs)
return self._test(*xs, **kwargs)
class kp_pure_pie_s(nn.Module):
def __init__(
self, n, nstack, dims, modules, out_dim, pre=None, cnv_dim=256,
make_tl_layer=make_tl_layer, make_br_layer=make_br_layer,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual, if_dcn=False
):
super(kp_pure_pie_s, self).__init__()
print("use kp pure pie")
self.nstack = nstack
self._decode = _decode_pure
self.if_dcn = if_dcn
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.tl_cnvs = nn.ModuleList([
make_tl_layer(cnv_dim) for _ in range(nstack)
])
self.br_cnvs = nn.ModuleList([
make_br_layer(cnv_dim) for _ in range(nstack)
])
## keypoint heatmaps
self.tl_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
self.br_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
for tl_heat, br_heat in zip(self.tl_heats, self.br_heats):
tl_heat[-1].bias.data.fill_(-2.19)
br_heat[-1].bias.data.fill_(-2.19)
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.tl_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.br_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.relu = nn.ReLU(inplace=True)
def _train(self, *xs):
image = xs[0]
center_inds = xs[1]
key_inds_tl = xs[2]
key_inds_br = xs[3]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
tl_cnv_, br_cnv_ = layer[2:4]
center_heat_, key_heat_ = layer[4:6]
center_regr_, key_regr_ = layer[6:8]
kp = kp_(inter)
cnv = cnv_(kp)
#tl_cnv = tl_cnv_(cnv)
#br_cnv = br_cnv_(cnv)
center_heat, key_heat = center_heat_(cnv), key_heat_(cnv)
center_regr, key_regr = center_regr_(cnv), key_regr_(cnv)
center_regr = _tranpose_and_gather_feat(center_regr, center_inds)
key_regr_tl = _tranpose_and_gather_feat(key_regr, key_inds_tl)
key_regr_br = _tranpose_and_gather_feat(key_regr, key_inds_br)
outs += [center_heat, key_heat, center_regr, key_regr_tl, key_regr_br]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def _test(self, *xs, **kwargs):
image = xs[0]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
center_cnv_, key_cnv_ = layer[2:4]
center_heat_, key_heat_ = layer[4:6]
center_regr_, key_regr_ = layer[6:8]
kp = kp_(inter)
cnv = cnv_(kp)
if ind == self.nstack - 1:
#center_cnv = center_cnv_(cnv)
#key_cnv = key_cnv_(cnv)
center_heat, key_heat = center_heat_(cnv), key_heat_(cnv)
center_regr, key_regr = center_regr_(cnv), key_regr_(cnv)
outs += [center_heat, key_heat, center_regr, key_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return self._decode(*outs[-4:], **kwargs), 0, 0
def forward(self, *xs, **kwargs):
if len(xs) > 1:
return self._train(*xs, **kwargs)
return self._test(*xs, **kwargs)
class kp_line(nn.Module):
def __init__(
self, n, nstack, dims, modules, out_dim, pre=None, cnv_dim=256,
make_center_layer=make_center_layer,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual
):
super(kp_line, self).__init__()
print("use kp")
self.nstack = nstack
self._decode = _decode_pure_line
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.key_cnvs = nn.ModuleList([
make_center_layer(cnv_dim) for _ in range(nstack)
])
self.hybrid_cnvs = nn.ModuleList([
make_center_layer(cnv_dim) for _ in range(nstack)
])
## keypoint heatmaps
self.key_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
self.hybrid_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
## tags
self.key_tags = nn.ModuleList([
make_tag_layer(cnv_dim, curr_dim, 1) for _ in range(nstack)
])
for key_heat, hybrid_heat in zip(self.key_heats, self.hybrid_heats):
key_heat[-1].bias.data.fill_(-2.19)
hybrid_heat[-1].bias.data.fill_(-2.19)
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.key_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.relu = nn.ReLU(inplace=True)
def _train(self, *xs):
image = xs[0]
key_inds = xs[1]
key_inds_grouped = xs[2]
tag_group_lens = xs[3]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.key_cnvs, self.hybrid_cnvs,
self.key_heats, self.hybrid_heats,
self.key_tags,
self.key_regrs,
)
for ind, layer in enumerate(layers):
key_tag_grouped = []
kp_, cnv_ = layer[0:2]
key_cnv_, hybrid_cnv_ = layer[2:4]
key_heat_, hybrid_heat_ = layer[4:6]
key_tag_ = layer[6]
key_regr_= layer[7]
kp = kp_(inter)
cnv = cnv_(kp)
key_cnv = key_cnv_(cnv)
hybrid_cnv = hybrid_cnv_(cnv)
key_heat, hybrid_heat = key_heat_(key_cnv), hybrid_heat_(hybrid_cnv)
key_tag_ori = key_tag_(cnv)
key_regr_ori = key_regr_(key_cnv)
key_tag = _tranpose_and_gather_feat(key_tag_ori, key_inds)
key_regr = _tranpose_and_gather_feat(key_regr_ori, key_inds)
for g_id in range(16):
key_tag_grouped.append(torch.unsqueeze(_tranpose_and_gather_feat(key_tag_ori, key_inds_grouped[:, g_id,:]), 1))
key_tag_grouped = torch.cat(key_tag_grouped, 1)
outs += [key_heat, hybrid_heat, key_tag, key_tag_grouped, key_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def _test(self, *xs, **kwargs):
image = xs[0]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.key_cnvs, self.hybrid_cnvs,
self.key_heats, self.hybrid_heats,
self.key_tags,
self.key_regrs,
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
key_cnv_, hybrid_cnv_ = layer[2:4]
key_heat_, hybrid_heat_ = layer[4:6]
key_tag_ = layer[6]
key_regr_ = layer[7]
kp = kp_(inter)
cnv = cnv_(kp)
if ind == self.nstack - 1:
key_cnv = key_cnv_(cnv)
hybrid_cnv = hybrid_cnv_(cnv)
key_heat, hybrid_heat = key_heat_(key_cnv), hybrid_heat_(hybrid_cnv)
key_tag_ori = key_tag_(cnv)
key_regr_ori = key_regr_(key_cnv)
outs += [key_heat, hybrid_heat, key_tag_ori, key_regr_ori]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return self._decode(*outs[-4:], **kwargs), 0, 0
def forward(self, *xs, **kwargs):
if len(xs) > 1:
return self._train(*xs, **kwargs)
return self._test(*xs, **kwargs)
class kp_pure_dcn(nn.Module):
def __init__(
self, n, nstack, dims, modules, out_dim, pre=None, cnv_dim=256,
make_tl_layer=make_tl_layer, make_br_layer=make_br_layer,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual, if_dcn=False
):
super(kp_pure_dcn, self).__init__()
print("use kp pure")
self.nstack = nstack
self._decode = _decode_pure
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.dcns = nn.ModuleList([
dcn(4, cnv_dim, cnv_dim, 3, 3) for _ in range(nstack)
])
self.tl_cnvs = nn.ModuleList([
make_tl_layer(cnv_dim) for _ in range(nstack)
])
self.br_cnvs = nn.ModuleList([
make_br_layer(cnv_dim) for _ in range(nstack)
])
## keypoint heatmaps
self.tl_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
self.br_heats = nn.ModuleList([
make_heat_layer(cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
for tl_heat, br_heat in zip(self.tl_heats, self.br_heats):
tl_heat[-1].bias.data.fill_(-2.19)
br_heat[-1].bias.data.fill_(-2.19)
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.tl_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.br_regrs = nn.ModuleList([
make_regr_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.relu = nn.ReLU(inplace=True)
def _train(self, *xs):
image = xs[0]
tl_inds = xs[1]
br_inds = xs[2]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs, self.dcns,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_ , dcn_ = layer[0:3]
tl_cnv_, br_cnv_ = layer[3:5]
tl_heat_, br_heat_ = layer[5:7]
tl_regr_, br_regr_ = layer[7:9]
ts = time.time()
kp = kp_(inter)
cnv = cnv_(kp)
dcn = dcn_(cnv)
te = time.time()
tl_cnv = tl_cnv_(dcn)
br_cnv = br_cnv_(dcn)
tl_heat, br_heat = tl_heat_(tl_cnv), br_heat_(br_cnv)
tl_regr, br_regr = tl_regr_(tl_cnv), br_regr_(br_cnv)
tl_regr = _tranpose_and_gather_feat(tl_regr, tl_inds)
br_regr = _tranpose_and_gather_feat(br_regr, br_inds)
outs += [tl_heat, br_heat, tl_regr, br_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](dcn)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def _test(self, *xs, **kwargs):
tp = time.time()
image = xs[0]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs, self.dcns,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_, dcn_ = layer[0:3]
tl_cnv_, br_cnv_ = layer[3:5]
tl_heat_, br_heat_ = layer[5:7]
tl_regr_, br_regr_ = layer[7:9]
kp = kp_(inter)
cnv = cnv_(kp)
dcn = dcn_(cnv)
if ind == self.nstack - 1:
tl_cnv = tl_cnv_(dcn)
br_cnv = br_cnv_(dcn)
ts = time.time()
tl_heat, br_heat = tl_heat_(tl_cnv), br_heat_(br_cnv)
tl_regr, br_regr = tl_regr_(tl_cnv), br_regr_(br_cnv)
te = time.time()
outs += [tl_heat, br_heat, tl_regr, br_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](dcn)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return self._decode(*outs[-4:], **kwargs), ts-tp, te-ts
def forward(self, *xs, **kwargs):
if len(xs) > 1:
return self._train(*xs, **kwargs)
return self._test(*xs, **kwargs)
class kp_pure_mix(nn.Module):
def __init__(
self, n, nstack, dims, modules, out_dim, pre=None, cnv_dim=256,
make_tl_layer=make_tl_layer, make_br_layer=make_br_layer,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual
):
super(kp_pure_mix, self).__init__()
print("use kp mix")
self.nstack = nstack
self._decode = _decode_pure
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.tl_cnvs = nn.ModuleList([
make_tl_layer(cnv_dim) for _ in range(nstack)
])
self.br_cnvs = nn.ModuleList([
make_br_layer(cnv_dim) for _ in range(nstack)
])
## keypoint heatmaps
self.tl_heats = nn.ModuleList([
make_heat_layer(2*cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
self.br_heats = nn.ModuleList([
make_heat_layer(2*cnv_dim, curr_dim, out_dim) for _ in range(nstack)
])
for tl_heat, br_heat in zip(self.tl_heats, self.br_heats):
tl_heat[-1].bias.data.fill_(-2.19)
br_heat[-1].bias.data.fill_(-2.19)
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.tl_regrs = nn.ModuleList([
make_regr_layer(2*cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.br_regrs = nn.ModuleList([
make_regr_layer(2*cnv_dim, curr_dim, 2) for _ in range(nstack)
])
self.relu = nn.ReLU(inplace=True)
def _train(self, *xs):
image = xs[0]
tl_inds = xs[1]
br_inds = xs[2]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
tl_cnv_, br_cnv_ = layer[2:4]
tl_heat_, br_heat_ = layer[4:6]
tl_regr_, br_regr_ = layer[6:8]
kp = kp_(inter)
cnv = cnv_(kp)
tl_cnv = tl_cnv_(cnv)
br_cnv = br_cnv_(cnv)
tl_cnv_mixed = torch.cat((tl_cnv, cnv), dim=1)
br_cnv_mixed = torch.cat((br_cnv, cnv), dim=1)
tl_heat, br_heat = tl_heat_(tl_cnv_mixed), br_heat_(br_cnv_mixed)
tl_regr, br_regr = tl_regr_(tl_cnv_mixed), br_regr_(br_cnv_mixed)
tl_regr = _tranpose_and_gather_feat(tl_regr, tl_inds)
br_regr = _tranpose_and_gather_feat(br_regr, br_inds)
outs += [tl_heat, br_heat, tl_regr, br_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def _test(self, *xs, **kwargs):
image = xs[0]
inter = self.pre(image)
outs = []
layers = zip(
self.kps, self.cnvs,
self.tl_cnvs, self.br_cnvs,
self.tl_heats, self.br_heats,
self.tl_regrs, self.br_regrs
)
for ind, layer in enumerate(layers):
kp_, cnv_ = layer[0:2]
tl_cnv_, br_cnv_ = layer[2:4]
tl_heat_, br_heat_ = layer[4:6]
tl_regr_, br_regr_ = layer[6:8]
kp = kp_(inter)
cnv = cnv_(kp)
if ind == self.nstack - 1:
tl_cnv = tl_cnv_(cnv)
br_cnv = br_cnv_(cnv)
tl_cnv_mixed = torch.cat((tl_cnv, cnv), dim=1)
br_cnv_mixed = torch.cat((br_cnv, cnv), dim=1)
tl_heat, br_heat = tl_heat_(tl_cnv_mixed), br_heat_(br_cnv_mixed)
tl_regr, br_regr = tl_regr_(tl_cnv_mixed), br_regr_(br_cnv_mixed)
outs += [tl_heat, br_heat, tl_regr, br_regr]
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return self._decode(*outs[-4:], **kwargs)
def forward(self, *xs, **kwargs):
if len(xs) > 1:
return self._train(*xs, **kwargs)
return self._test(*xs, **kwargs)
class AELoss(nn.Module):
def __init__(self, pull_weight=1, push_weight=1, regr_weight=1, focal_loss=_neg_loss, lamda=4, lamdb=2):
super(AELoss, self).__init__()
self.pull_weight = pull_weight
self.push_weight = push_weight
self.regr_weight = regr_weight
self.focal_loss = focal_loss
self.ae_loss = _ae_loss
self.regr_loss = _regr_loss
self.lamda = lamda
self.lamdb = lamdb
def forward(self, outs, targets):
stride = 6
tl_heats = outs[0::stride]
br_heats = outs[1::stride]
tl_tags = outs[2::stride]
br_tags = outs[3::stride]
tl_regrs = outs[4::stride]
br_regrs = outs[5::stride]
gt_tl_heat = targets[0]
gt_br_heat = targets[1]
gt_mask = targets[2]
gt_tl_regr = targets[3]
gt_br_regr = targets[4]
# focal loss
focal_loss = 0
tl_heats = [_sigmoid(t) for t in tl_heats]
br_heats = [_sigmoid(b) for b in br_heats]
focal_loss += self.focal_loss(tl_heats, gt_tl_heat, self.lamda, self.lamdb)
focal_loss += self.focal_loss(br_heats, gt_br_heat, self.lamda, self.lamdb)
# tag loss
pull_loss = 0
push_loss = 0
for tl_tag, br_tag in zip(tl_tags, br_tags):
pull, push = self.ae_loss(tl_tag, br_tag, gt_mask)
pull_loss += pull
push_loss += push
pull_loss = self.pull_weight * pull_loss
push_loss = self.push_weight * push_loss
regr_loss = 0
for tl_regr, br_regr in zip(tl_regrs, br_regrs):
regr_loss += self.regr_loss(tl_regr, gt_tl_regr, gt_mask)
regr_loss += self.regr_loss(br_regr, gt_br_regr, gt_mask)
regr_loss = self.regr_weight * regr_loss
loss = (focal_loss + pull_loss + push_loss + regr_loss) / len(tl_heats)
return loss.unsqueeze(0)
class AELossPureCls(nn.Module):
def __init__(self, pull_weight=1, push_weight=1, regr_weight=1, focal_loss=_neg_loss, lamda=4, lamdb=2):
super(AELossPureCls, self).__init__()
self.pull_weight = pull_weight
self.push_weight = push_weight
self.regr_weight = regr_weight
self.focal_loss = focal_loss
self.ae_loss = _ae_loss
self.regr_loss = _regr_loss
self.lamda = lamda
self.lamdb = lamdb
self.cls_loss = nn.CrossEntropyLoss(size_average=True)
self.offset_loss = _offset_loss
def forward(self, outs, targets):
stride = 4
tl_heats = outs[0:-2:stride]
br_heats = outs[1:-2:stride]
tl_regrs = outs[2:-2:stride]
br_regrs = outs[3:-2:stride]
cls = outs[-2]
offset = outs[-1]
gt_tl_heat = targets[0]
gt_br_heat = targets[1]
gt_mask = targets[2]
gt_tl_regr = targets[3]
gt_br_regr = targets[4]
gt_cls = targets[5]
gt_offset = targets[6]
# focal loss
focal_loss = 0
tl_heats = [_sigmoid(t) for t in tl_heats]
br_heats = [_sigmoid(b) for b in br_heats]
focal_loss += self.focal_loss(tl_heats, gt_tl_heat, self.lamda, self.lamdb)
focal_loss += self.focal_loss(br_heats, gt_br_heat, self.lamda, self.lamdb)
regr_loss = 0
for tl_regr, br_regr in zip(tl_regrs, br_regrs):
regr_loss += self.regr_loss(tl_regr, gt_tl_regr, gt_mask)
regr_loss += self.regr_loss(br_regr, gt_br_regr, gt_mask)
regr_loss = self.regr_weight * regr_loss
cls_loss = self.cls_loss(cls, gt_cls)
cls_loss = self.regr_weight * cls_loss
offset_loss = self.offset_loss(offset, gt_offset)
offset_loss = self.regr_weight * offset_loss
loss = (focal_loss + regr_loss) / len(tl_heats) + cls_loss + offset_loss
return loss.unsqueeze(0)
class AELossLineCls(nn.Module):
def __init__(self, pull_weight=1, push_weight=1, regr_weight=1, focal_loss=_neg_loss, lamda=4, lamdb=2):
super(AELossLineCls, self).__init__()
self.pull_weight = pull_weight
self.push_weight = push_weight
self.regr_weight = regr_weight
self.focal_loss = focal_loss
self.ae_loss = _ae_loss
self.regr_loss = _regr_loss
self.lamda = lamda
self.lamdb = lamdb
self.cls_loss = nn.CrossEntropyLoss(size_average=True)
self.offset_loss = _offset_loss
def forward(self, outs, targets):
stride = 2
ps_predictions = outs[0::stride]
ng_predictions = outs[1::stride]
ps_ind = targets[0].view(-1)
ng_ind = targets[1].view(-1)
ps_mask = targets[2].view(-1)
ng_mask = targets[2].view(-1)
# focal loss
cls_loss = 0
for ps_pre, ng_pre in zip(ps_predictions, ng_predictions):
ps_pre = ps_pre.view(-1, 2)
ng_pre = ng_pre.view(-1, 2)
if ps_mask.sum() > 0:
cls_loss += (self.cls_loss(ps_pre[ps_mask], ps_ind[ps_mask])/2)
if ng_mask.sum() > 0:
cls_loss += (self.cls_loss(ng_pre[ng_mask], ng_ind[ng_mask])/2)
loss = cls_loss
return loss.unsqueeze(0)
class AELossLineClsFocal(nn.Module):
def __init__(self, pull_weight=1, push_weight=1, regr_weight=1, focal_loss=_neg_loss, lamda=4, lamdb=2):
super(AELossLineClsFocal, self).__init__()
self.pull_weight = pull_weight
self.push_weight = push_weight
self.regr_weight = regr_weight
self.focal_loss = focal_loss
self.ae_loss = _ae_loss
self.regr_loss = _regr_loss
self.lamda = lamda
self.lamdb = lamdb
self.cls_loss = nn.CrossEntropyLoss(size_average=False, reduce=False)
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, outs, targets):
stride = 2
ps_predictions = outs[0::stride]
ng_predictions = outs[1::stride]
ps_ind = targets[0].view(-1)
ng_ind = targets[1].view(-1)
ps_mask = targets[2].view(-1)
ng_mask = targets[2].view(-1)
# focal loss
cls_loss = 0
for ps_pre, ng_pre in zip(ps_predictions, ng_predictions):
ps_pre = ps_pre.view(-1, 2)
ng_pre = ng_pre.view(-1, 2)
ps_pre_n = self.softmax(ps_pre)
ng_pre_n = self.softmax(ng_pre)
if ps_mask.sum() > 0:
cls_loss += (torch.pow(1 - ps_pre_n[ps_mask][:, 0], self.lamdb) * self.cls_loss(ps_pre[ps_mask],
ps_ind[
ps_mask]) / 2).mean()
if ng_mask.sum() > 0:
cls_loss += (torch.pow(1 - ng_pre_n[ng_mask][:, 1], self.lamdb) * self.cls_loss(ng_pre[ng_mask],
ng_ind[
ng_mask]) / 2).mean()
loss = cls_loss
return loss.unsqueeze(0)
class AELossLine(nn.Module):
def __init__(self, pull_weight=1, push_weight=1, regr_weight=1, focal_loss=_neg_loss, lamda=4, lamdb=2):
super(AELossLine, self).__init__()
self.pull_weight = pull_weight
self.push_weight = push_weight
self.regr_weight = regr_weight
self.focal_loss = focal_loss
self.ae_loss = _ae_line_loss
self.regr_loss = _regr_loss
self.lamda = lamda
self.lamdb = lamdb
def forward(self, outs, targets):
stride = 5
key_heats = outs[0::stride]
hybrid_heats = outs[1::stride]
key_tags = outs[2::stride]
key_tags_grouped = outs[3::stride]
key_regrs = outs[4::stride]
gt_key_heat = targets[0]
gt_hybrid_heat = targets[1]
gt_mask = targets[2]
gt_mask_grouped = targets[3]
gt_key_regr = targets[4]
# focal loss
focal_loss = 0
key_heats = [_sigmoid(t) for t in key_heats]
hybrid_heats = [_sigmoid(b) for b in hybrid_heats]
focal_loss += self.focal_loss(key_heats, gt_key_heat, self.lamda, self.lamdb)
focal_loss += self.focal_loss(hybrid_heats, gt_hybrid_heat, self.lamda, self.lamdb)
# tag loss
pull_loss = 0
push_loss = 0
for key_tag_grouped in key_tags_grouped:
pull, push = self.ae_loss(key_tag_grouped, gt_mask_grouped)
pull_loss += pull
push_loss += push
pull_loss = self.pull_weight * pull_loss
push_loss = self.push_weight * push_loss
regr_loss = 0
for key_regr in key_regrs:
regr_loss += self.regr_loss(key_regr, gt_key_regr, gt_mask)
regr_loss = self.regr_weight * regr_loss
loss = (focal_loss + pull_loss + push_loss + regr_loss) / len(key_heats)
return loss.unsqueeze(0)
class AELossPurePie(nn.Module):
def __init__(self, lamda, lamdb, regr_weight=1, focal_loss=_neg_loss):
super(AELossPurePie, self).__init__()
self.regr_weight = regr_weight
self.focal_loss = focal_loss
self.ae_loss = _ae_loss
self.regr_loss = _regr_loss
self.lamda = lamda
self.lamdb = lamdb
def forward(self, outs, targets):
stride = 5
center_heats = outs[0::stride]
key_heats = outs[1::stride]
center_regrs = outs[2::stride]
key_regrs_tl = outs[3::stride]
key_regrs_br = outs[4::stride]
gt_center_heat = targets[0]
gt_key_heat = targets[1]
gt_mask = targets[2]
gt_center_regr = targets[3]
gt_key_regr_tl = targets[4]
gt_key_regr_br = targets[5]
# focal loss
focal_loss = 0
center_heats = [_sigmoid(t) for t in center_heats]
key_heats = [_sigmoid(b) for b in key_heats]
#print(center_heats[0].shape)
#print(gt_center_heat.shape)
focal_loss += self.focal_loss(center_heats, gt_center_heat, self.lamda, self.lamdb)
focal_loss += self.focal_loss(key_heats, gt_key_heat, self.lamda, self.lamdb)
regr_loss = 0
for center_regr, key_regr_tl, key_regr_br in zip(center_regrs, key_regrs_tl, key_regrs_br):
regr_loss += self.regr_loss(center_regr, gt_center_regr, gt_mask)
regr_loss += self.regr_loss(key_regr_tl, gt_key_regr_tl, gt_mask)/2
regr_loss += self.regr_loss(key_regr_br, gt_key_regr_br, gt_mask)/2
regr_loss = self.regr_weight * regr_loss
loss = (focal_loss + regr_loss) / len(center_heats)
return loss.unsqueeze(0)
class AELossPure(nn.Module):
def __init__(self, lamda, lamdb, regr_weight=1, focal_loss=_neg_loss):
super(AELossPure, self).__init__()
self.regr_weight = regr_weight
self.focal_loss = focal_loss
self.ae_loss = _ae_loss
self.regr_loss = _regr_loss
self.lamda = lamda
self.lamdb = lamdb
def forward(self, outs, targets):
stride = 4
tl_heats = outs[0::stride]
br_heats = outs[1::stride]
tl_regrs = outs[2::stride]
br_regrs = outs[3::stride]
gt_tl_heat = targets[0]
gt_br_heat = targets[1]
gt_mask = targets[2]
gt_tl_regr = targets[3]
gt_br_regr = targets[4]
# focal loss
focal_loss = 0
tl_heats = [_sigmoid(t) for t in tl_heats]
br_heats = [_sigmoid(b) for b in br_heats]
focal_loss += self.focal_loss(tl_heats, gt_tl_heat, self.lamda, self.lamdb)
focal_loss += self.focal_loss(br_heats, gt_br_heat, self.lamda, self.lamdb)
regr_loss = 0
for tl_regr, br_regr in zip(tl_regrs, br_regrs):
regr_loss += self.regr_loss(tl_regr, gt_tl_regr, gt_mask)
regr_loss += self.regr_loss(br_regr, gt_br_regr, gt_mask)
regr_loss = self.regr_weight * regr_loss
loss = (focal_loss + regr_loss) / len(tl_heats)
return loss.unsqueeze(0)
| 35.022698
| 137
| 0.562046
| 11,070
| 81,778
| 3.777145
| 0.016531
| 0.093631
| 0.028938
| 0.046301
| 0.931481
| 0.919905
| 0.909956
| 0.903284
| 0.896133
| 0.890369
| 0
| 0.017903
| 0.336105
| 81,778
| 2,335
| 138
| 35.022698
| 0.752247
| 0.005356
| 0
| 0.87487
| 0
| 0
| 0.001328
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032325
| false
| 0
| 0.005735
| 0
| 0.076642
| 0.005735
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fa2001d65f788d695964acfd63a7eb058c8229b7
| 5,903
|
py
|
Python
|
pycqed/simulations/pauli_transfer_matrices.py
|
sergimasot/PycQED_py3
|
54ad1b14929ffe5cc87cf59423a970e4b9baa3e1
|
[
"MIT"
] | 7
|
2017-02-27T09:49:23.000Z
|
2022-03-07T16:09:50.000Z
|
pycqed/simulations/pauli_transfer_matrices.py
|
sergimasot/PycQED_py3
|
54ad1b14929ffe5cc87cf59423a970e4b9baa3e1
|
[
"MIT"
] | 109
|
2019-10-01T16:09:24.000Z
|
2022-01-23T19:48:20.000Z
|
pycqed/simulations/pauli_transfer_matrices.py
|
sergimasot/PycQED_py3
|
54ad1b14929ffe5cc87cf59423a970e4b9baa3e1
|
[
"MIT"
] | 3
|
2019-11-07T08:31:00.000Z
|
2021-04-20T08:10:55.000Z
|
import numpy as np
"""
This file contains pauli transfer matrices for all basic qubit operations.
"""
I = np.eye(4)
# Pauli group
X = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, -1]], dtype=int)
Y = np.array([[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, -1]], dtype=int)
Z = np.array([[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]], dtype=int)
# Exchange group
S = np.array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 0, 1, 0]], dtype=int)
S2 = np.dot(S, S)
# Hadamard group
H = np.array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, -1, 0],
[0, 1, 0, 0]], dtype=int)
CZ = np.array([
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]],
dtype=int)
# CZ = np.array([[ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0],
# [ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
# [ 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]],
# dtype=int)
def X_theta(theta:float, unit='deg'):
"""
PTM of rotation of theta degrees along the X axis
"""
if unit=='deg':
theta = np.deg2rad(theta)
X = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, np.cos(theta), -np.sin(theta)],
[0, 0, np.sin(theta), np.cos(theta)]], dtype=float)
return X
def Y_theta(theta:float, unit='deg'):
"""
PTM of rotation of theta degrees along the X axis
"""
if unit=='deg':
theta = np.deg2rad(theta)
Y = np.array([[1, 0, 0, 0],
[0, np.cos(theta), 0, np.sin(theta)],
[0, 0, 1, 0],
[0, -np.sin(theta), 0, np.cos(theta)]], dtype=float)
return Y
def Z_theta(theta:float, unit='deg'):
"""
PTM of rotation of theta degrees along the X axis
"""
if unit=='deg':
theta = np.deg2rad(theta)
Z = np.array([[1, 0, 0, 0],
[0, np.cos(theta), -np.sin(theta), 0],
[0, np.sin(theta), np.cos(theta), 0],
[0, 0, 0, 1]], dtype=float)
return Z
##############################################################################
#
##############################################################################
def process_fidelity(ptm_0, ptm_1, d: int=None):
"""
Calculates the average process fidelity between two pauli transfer matrices
Args:
ptm_0 (array) : n*n array specifying the first pauli transfer matrix
ptm_1 (array) : n*n array specifying the second pauli transfer matrix
d (int) : dimension of the Hilbert space
returns:
F (float) : Process fidelity
"""
if d == None:
d = np.shape(ptm_0)[0]**0.5
return np.dot(ptm_0.T, ptm_1).trace()/(d**2)
def average_gate_fidelity(ptm_0, ptm_1, d: int=None):
"""
Calculates the average average gate fidelity between two pauli transfer
matrices
Args:
ptm_0 (array) : n*n array specifying the first pauli transfer matrix
ptm_1 (array) : n*n array specifying the second pauli transfer matrix
d (int) : dimension of the Hilbert space
returns:
F_gate (float): Average gate fidelity
"""
if d == None:
d = np.shape(ptm_0)[0]**0.5
F_pro = process_fidelity(ptm_0, ptm_1, d)
F_avg_gate = process_fid_to_avg_gate_fid(F_pro, d)
return F_avg_gate
def process_fid_to_avg_gate_fid(F_pro: float, d:int):
"""
Converts
"""
F_avg_gate = (d*F_pro+1)/(d+1)
return F_avg_gate
| 35.993902
| 83
| 0.367779
| 1,024
| 5,903
| 2.077148
| 0.079102
| 0.48519
| 0.648801
| 0.77292
| 0.841561
| 0.828867
| 0.804419
| 0.789845
| 0.757405
| 0.756935
| 0
| 0.190255
| 0.41589
| 5,903
| 163
| 84
| 36.214724
| 0.426624
| 0.370998
| 0
| 0.410256
| 0
| 0
| 0.005435
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.012821
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
fa3200e8510993774401d5713267fcdc413ec224
| 3,039
|
py
|
Python
|
files/catkin_ws/build/gazebo_ros_pkgs/gazebo_msgs/cmake/gazebo_msgs-genmsg-context.py
|
Filipe-Douglas-Slam/slam_lidar_kinect
|
4ac2c9555f939ba3bc3e97314eb611bdd9df5f27
|
[
"MIT"
] | null | null | null |
files/catkin_ws/build/gazebo_ros_pkgs/gazebo_msgs/cmake/gazebo_msgs-genmsg-context.py
|
Filipe-Douglas-Slam/slam_lidar_kinect
|
4ac2c9555f939ba3bc3e97314eb611bdd9df5f27
|
[
"MIT"
] | null | null | null |
files/catkin_ws/build/gazebo_ros_pkgs/gazebo_msgs/cmake/gazebo_msgs-genmsg-context.py
|
Filipe-Douglas-Slam/slam_lidar_kinect
|
4ac2c9555f939ba3bc3e97314eb611bdd9df5f27
|
[
"MIT"
] | null | null | null |
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/msg/ContactsState.msg;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/msg/ContactState.msg;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/msg/LinkState.msg;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/msg/LinkStates.msg;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/msg/ModelState.msg;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/msg/ModelStates.msg;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/msg/ODEJointProperties.msg;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/msg/ODEPhysics.msg;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/msg/WorldState.msg"
services_str = "/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/ApplyBodyWrench.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/DeleteModel.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/DeleteLight.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/GetLinkState.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/GetPhysicsProperties.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/SetJointProperties.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/SetModelConfiguration.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/SpawnModel.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/ApplyJointEffort.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/GetJointProperties.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/GetModelProperties.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/GetWorldProperties.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/SetLinkProperties.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/SetModelState.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/BodyRequest.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/GetLinkProperties.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/GetModelState.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/JointRequest.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/SetLinkState.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/SetPhysicsProperties.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/SetJointTrajectory.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/GetLightProperties.srv;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/srv/SetLightProperties.srv"
pkg_name = "gazebo_msgs"
dependencies_str = "std_msgs;geometry_msgs;sensor_msgs;trajectory_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "gazebo_msgs;/root/catkin_ws/src/gazebo_ros_pkgs/gazebo_msgs/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg;sensor_msgs;/opt/ros/kinetic/share/sensor_msgs/cmake/../msg;trajectory_msgs;/opt/ros/kinetic/share/trajectory_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| 253.25
| 1,684
| 0.854886
| 498
| 3,039
| 4.891566
| 0.170683
| 0.143678
| 0.162562
| 0.203202
| 0.630952
| 0.594828
| 0.594828
| 0.594828
| 0.594828
| 0.594828
| 0
| 0.000333
| 0.011188
| 3,039
| 11
| 1,685
| 276.272727
| 0.810316
| 0.016124
| 0
| 0
| 1
| 0.333333
| 0.932062
| 0.921687
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fa3689de3573995b5c57a4fb897fca0e897ae3e0
| 171
|
py
|
Python
|
tests/handlers.py
|
cloudchacho/hedwig-python
|
1e4ca5472fe661ffd9d3cedd10a9ddc2daa0926b
|
[
"Apache-2.0"
] | null | null | null |
tests/handlers.py
|
cloudchacho/hedwig-python
|
1e4ca5472fe661ffd9d3cedd10a9ddc2daa0926b
|
[
"Apache-2.0"
] | 3
|
2021-06-25T20:52:50.000Z
|
2021-11-30T16:22:30.000Z
|
tests/handlers.py
|
cloudchacho/hedwig-python
|
1e4ca5472fe661ffd9d3cedd10a9ddc2daa0926b
|
[
"Apache-2.0"
] | null | null | null |
from hedwig.models import Message
def _trip_created_handler(message: Message):
pass
def trip_created_handler(message: Message):
_trip_created_handler(message)
| 17.1
| 44
| 0.795322
| 22
| 171
| 5.818182
| 0.454545
| 0.257813
| 0.421875
| 0.585938
| 0.546875
| 0.546875
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140351
| 171
| 9
| 45
| 19
| 0.870748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.2
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 8
|
ad13209a3f4f5802339a51be3bffff4923a067dd
| 31
|
py
|
Python
|
calculators/CHA2DS2_and_HasBledScore/calculator_cha2ds2.py
|
pjayathissa/algorithm-audit
|
c8d35685187a460ae8ffd13a7ad9c85fb5ac500b
|
[
"MIT"
] | 1
|
2021-01-26T02:29:31.000Z
|
2021-01-26T02:29:31.000Z
|
calculators/CHA2DS2_and_HasBledScore/calculator_cha2ds2.py
|
pjayathissa/algorithm-audit
|
c8d35685187a460ae8ffd13a7ad9c85fb5ac500b
|
[
"MIT"
] | 6
|
2020-11-22T21:59:24.000Z
|
2020-12-07T22:11:58.000Z
|
calculators/CHA2DS2_and_HasBledScore/calculator_cha2ds2.py
|
pjayathissa/algorithm-audit
|
c8d35685187a460ae8ffd13a7ad9c85fb5ac500b
|
[
"MIT"
] | 2
|
2020-11-19T22:52:46.000Z
|
2021-01-26T19:12:22.000Z
|
def calculate():
return 0
| 7.75
| 16
| 0.612903
| 4
| 31
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.290323
| 31
| 3
| 17
| 10.333333
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
ad16b0cdcb93d937bc34afc07405b7416765d976
| 23,419
|
py
|
Python
|
xga/models/sb.py
|
DavidT3/XGA
|
cde51c3f29f98b5f1e981fb6d327c04072b0ba38
|
[
"BSD-3-Clause"
] | 12
|
2020-05-16T09:45:45.000Z
|
2022-02-14T14:41:46.000Z
|
xga/models/sb.py
|
DavidT3/XGA
|
cde51c3f29f98b5f1e981fb6d327c04072b0ba38
|
[
"BSD-3-Clause"
] | 684
|
2020-05-28T08:52:09.000Z
|
2022-03-31T10:56:24.000Z
|
xga/models/sb.py
|
DavidT3/XGA
|
cde51c3f29f98b5f1e981fb6d327c04072b0ba38
|
[
"BSD-3-Clause"
] | 2
|
2022-02-04T10:55:55.000Z
|
2022-02-04T11:30:56.000Z
|
# This code is a part of XMM: Generate and Analyse (XGA), a module designed for the XMM Cluster Survey (XCS).
# Last modified by David J Turner (david.turner@sussex.ac.uk) 26/03/2021, 16:58. Copyright (c) David J Turner
from typing import Union, List
import numpy as np
from astropy.units import Quantity, Unit, UnitConversionError, kpc, deg
from scipy.special import gamma
from .base import BaseModel1D
from ..exceptions import XGAFitError
from ..utils import r500, r200, r2500
class BetaProfile1D(BaseModel1D):
"""
An XGA model implementation of the beta profile, essentially a projected isothermal king profile, it can be
used to describe a simple galaxy cluster radial surface brightness profile.
"""
def __init__(self, x_unit: Union[str, Unit] = 'kpc', y_unit: Union[str, Unit] = Unit('ct/(s*arcmin**2)'),
cust_start_pars: List[Quantity] = None):
"""
The init of a subclass of the XGA BaseModel1D class, describing the surface brightness beta profile model.
:param Unit/str x_unit: The unit of the x-axis of this model, kpc for instance. May be passed as a string
representation or an astropy unit object.
:param Unit/str y_unit: The unit of the output of this model, keV for instance. May be passed as a string
representation or an astropy unit object.
:param List[Quantity] cust_start_pars: The start values of the model parameters for any fitting function that
used start values. The units are checked against default start values.
"""
# If a string representation of a unit was passed then we make it an astropy unit
if isinstance(x_unit, str):
x_unit = Unit(x_unit)
if isinstance(y_unit, str):
y_unit = Unit(y_unit)
poss_y_units = [Unit('ct/(s*arcmin**2)'), Unit('ct/(s*kpc**2)'), Unit('ct/(s*pix**2)')]
y_convertible = [u.is_equivalent(y_unit) for u in poss_y_units]
if not any(y_convertible):
allowed = ", ".join([u.to_string() for u in poss_y_units])
raise UnitConversionError("{p} is not convertible to any of the allowed units; "
"{a}".format(p=y_unit.to_string(), a=allowed))
else:
yu_ind = y_convertible.index(True)
poss_x_units = [kpc, deg, r200, r500, r2500]
x_convertible = [u.is_equivalent(x_unit) for u in poss_x_units]
if not any(x_convertible):
allowed = ", ".join([u.to_string() for u in poss_x_units])
raise UnitConversionError("{p} is not convertible to any of the allowed units; "
"{a}".format(p=x_unit.to_string(), a=allowed))
else:
xu_ind = x_convertible.index(True)
r_core_starts = [Quantity(100, 'kpc'), Quantity(0.2, 'deg'), Quantity(0.05, r200), Quantity(0.1, r500),
Quantity(0.5, r2500)]
# TODO MAKE THE NEW START PARAMETERS MORE SENSIBLE
norm_starts = [Quantity(1, 'ct/(s*arcmin**2)'), Quantity(1, 'ct/(s*kpc**2)'), Quantity(1, 'ct/(s*pix**2)')]
start_pars = [Quantity(1, ''), r_core_starts[xu_ind], norm_starts[yu_ind]]
if cust_start_pars is not None:
# If the custom start parameters can run this gauntlet without tripping an error then we're all good
# This method also returns the custom start pars converted to exactly the same units as the default
start_pars = self.compare_units(cust_start_pars, start_pars)
# TODO ALSO MAKE THESE MORE SENSIBLE
r_core_priors = [{'prior': Quantity([0, 2000], 'kpc'), 'type': 'uniform'},
{'prior': Quantity([0, 1], 'deg'), 'type': 'uniform'},
{'prior': Quantity([0, 1], r200), 'type': 'uniform'},
{'prior': Quantity([0, 1], r500), 'type': 'uniform'},
{'prior': Quantity([0, 1], r2500), 'type': 'uniform'}]
norm_priors = [{'prior': Quantity([0, 3], 'ct/(s*arcmin**2)'), 'type': 'uniform'},
{'prior': Quantity([0, 100], 'ct/(s*kpc**2)'), 'type': 'uniform'},
{'prior': Quantity([0, 100], 'ct/(s*pix**2)'), 'type': 'uniform'}]
priors = [{'prior': Quantity([0, 3]), 'type': 'uniform'}, r_core_priors[xu_ind], norm_priors[yu_ind]]
nice_pars = [r"$\beta$", r"R$_{\rm{core}}$", "S$_{0}$"]
info_dict = {'author': 'placeholder', 'year': 'placeholder', 'reference': 'placeholder',
'general': 'Essentially a projected isothermal king profile, it can be\n'
'used to describe a simple galaxy cluster radial surface brightness profile.'}
super().__init__(x_unit, y_unit, start_pars, priors, 'beta', 'Beta Profile', nice_pars, 'Surface Brightness',
info_dict)
@staticmethod
def model(x: Quantity, beta: Quantity, r_core: Quantity, norm: Quantity) -> Quantity:
"""
The model function for the beta profile.
:param Quantity x: The radii to calculate y values for.
:param Quantity beta: The beta slope parameter of the model.
:param Quantity r_core: The core radius.
:param Quantity norm: The normalisation of the model.
:return: The y values corresponding to the input x values.
:rtype: Quantity
"""
return norm * ((1 + ((x / r_core)**2))**((-3 * beta) + 0.5))
def derivative(self, x: Quantity, dx: Quantity = Quantity(0, ''), use_par_dist: bool = False) -> Quantity:
"""
Calculates the gradient of the beta profile at a given point, overriding the numerical method implemented
in the BaseModel1D class, as this simple model has an easily derivable first derivative.
:param Quantity x: The point(s) at which the slope of the model should be measured.
:param Quantity dx: This makes no difference here, as this is an analytical derivative. It has
been left in so that the inputs for this method don't vary between models.
:param bool use_par_dist: Should the parameter distributions be used to calculate a derivative
distribution; this can only be used if a fit has been performed using the model instance.
Default is False, in which case the current parameters will be used to calculate a single value.
:return: The calculated slope of the model at the supplied x position(s).
:rtype: Quantity
"""
# Just makes sure that if there are multiple x values then the broadcasting will go to the correct shape of
# numpy array
x = x[..., None]
# Generates a distribution of derivatives using the parameter distributions
if not use_par_dist:
beta, r_core, norm = self._model_pars
else:
beta, r_core, norm = self.par_dists
return ((2*x)/np.power(r_core, 2))*((-3*beta) + 0.5)*norm*np.power((1+(np.power(x/r_core, 2))), ((-3*beta)-0.5))
def inverse_abel(self, x: Quantity, use_par_dist: bool = False, method='analytical') -> Quantity:
"""
This overrides the inverse abel method of the model superclass, as there is an analytical solution to the
inverse abel transform of the single beta model. The form of the inverse abel transform is that of the
king profile, but with an extra transformation applied to the normalising parameter. This method can either
return a single value calculated using the current model parameters, or a distribution of values using
the parameter distributions (assuming that this model has had a fit run on it).
:param Quantity x: The x location(s) at which to calculate the value of the inverse abel transform.
:param bool use_par_dist: Should the parameter distributions be used to calculate a inverse abel transform
distribution; this can only be used if a fit has been performed using the model instance.
Default is False, in which case the current parameters will be used to calculate a single value.
:param str method: The method that should be used to calculate the values of this inverse abel transform.
Default for this overriding method is 'analytical', in which case the analytical solution is used.
You may pass 'direct', 'basex', 'hansenlaw', 'onion_bordas', 'onion_peeling', 'two_point', or
'three_point' to calculate the transform numerically.
:return: The inverse abel transform result.
:rtype: Quantity
"""
def transform(x_val: Quantity, beta: Quantity, r_core: Quantity, norm: Quantity):
"""
The function that calculates the inverse abel transform of this beta profile.
:param Quantity x_val: The x location(s) at which to calculate the value of the inverse abel transform.
:param Quantity beta: The beta parameter of the beta profile.
:param Quantity r_core: The core radius parameter of the beta profile.
:param Quantity norm: The normalisation of the beta profile.
:return:
"""
# We calculate the new normalisation parameter
new_norm = norm / ((gamma((3 * beta) - 0.5) * np.sqrt(np.pi) * r_core) / gamma(3 * beta))
# Then return the value of the transformed beta profile
return new_norm * np.power((1 + (np.power(x_val / r_core, 2))), (-3 * beta))
# Checking x units to make sure that they are valid
if not x.unit.is_equivalent(self._x_unit):
raise UnitConversionError("The input x coordinates cannot be converted to units of "
"{}".format(self._x_unit.to_string()))
else:
x = x.to(self._x_unit)
if method == 'analytical':
# The way the calculation is called depends on whether the user wants to use the parameter distributions
# or just the current model parameter values to calculate the inverse abel transform.
if not use_par_dist:
transform_res = transform(x, *self.model_pars)
elif use_par_dist and len(self._par_dists[0]) != 0:
transform_res = transform(x[..., None], *self.par_dists)
elif use_par_dist and len(self._par_dists[0]) == 0:
raise XGAFitError("No fit has been performed with this model, so there are no parameter distributions"
" available.")
else:
transform_res = super().inverse_abel(x, use_par_dist, method)
return transform_res
class DoubleBetaProfile1D(BaseModel1D):
"""
An XGA model implementation of the double beta profile, a summation of two single beta models. Often thought
to deal better with peaky cluster cores that you might get from a cool-core cluster, this model can be used
to describe a galaxy cluster radial surface brightness profile.
"""
def __init__(self, x_unit: Union[str, Unit] = 'kpc', y_unit: Union[str, Unit] = Unit('ct/(s*arcmin**2)'),
cust_start_pars: List[Quantity] = None):
"""
The init of a subclass of the XGA BaseModel1D class, describing the surface brightness double-beta
profile model.
:param Unit/str x_unit: The unit of the x-axis of this model, kpc for instance. May be passed as a string
representation or an astropy unit object.
:param Unit/str y_unit: The unit of the output of this model, keV for instance. May be passed as a string
representation or an astropy unit object.
:param List[Quantity] cust_start_pars: The start values of the model parameters for any fitting function that
used start values. The units are checked against default start values.
"""
# If a string representation of a unit was passed then we make it an astropy unit
if isinstance(x_unit, str):
x_unit = Unit(x_unit)
if isinstance(y_unit, str):
y_unit = Unit(y_unit)
poss_y_units = [Unit('ct/(s*arcmin**2)'), Unit('ct/(s*kpc**2)'), Unit('ct/(s*pix**2)')]
y_convertible = [u.is_equivalent(y_unit) for u in poss_y_units]
if not any(y_convertible):
allowed = ", ".join([u.to_string() for u in poss_y_units])
raise UnitConversionError("{p} is not convertible to any of the allowed units; "
"{a}".format(p=y_unit.to_string(), a=allowed))
else:
yu_ind = y_convertible.index(True)
poss_x_units = [kpc, deg, r200, r500, r2500]
x_convertible = [u.is_equivalent(x_unit) for u in poss_x_units]
if not any(x_convertible):
allowed = ", ".join([u.to_string() for u in poss_x_units])
raise UnitConversionError("{p} is not convertible to any of the allowed units; "
"{a}".format(p=x_unit.to_string(), a=allowed))
else:
xu_ind = x_convertible.index(True)
# TODO MAKE THE NEW START PARAMETERS MORE SENSIBLE
r_core1_starts = [Quantity(100, 'kpc'), Quantity(0.2, 'deg'), Quantity(0.05, r200), Quantity(0.1, r500),
Quantity(0.5, r2500)]
norm_starts = [Quantity(1, 'ct/(s*arcmin**2)'), Quantity(1, 'ct/(s*kpc**2)'), Quantity(1, 'ct/(s*pix**2)')]
r_core2_starts = [Quantity(400, 'kpc'), Quantity(0.5, 'deg'), Quantity(0.2, r200), Quantity(0.4, r500),
Quantity(1, r2500)]
start_pars = [Quantity(1, ''), r_core1_starts[xu_ind], norm_starts[yu_ind], Quantity(0.5, ''),
r_core2_starts[xu_ind], norm_starts[yu_ind]*0.5]
if cust_start_pars is not None:
# If the custom start parameters can run this gauntlet without tripping an error then we're all good
# This method also returns the custom start pars converted to exactly the same units as the default
start_pars = self.compare_units(cust_start_pars, start_pars)
# TODO ALSO MAKE THESE MORE SENSIBLE
r_core_priors = [{'prior': Quantity([1, 2000], 'kpc'), 'type': 'uniform'},
{'prior': Quantity([0, 1], 'deg'), 'type': 'uniform'},
{'prior': Quantity([0, 1], r200), 'type': 'uniform'},
{'prior': Quantity([0, 1], r500), 'type': 'uniform'},
{'prior': Quantity([0, 1], r2500), 'type': 'uniform'}]
norm_priors = [{'prior': Quantity([0, 3], 'ct/(s*arcmin**2)'), 'type': 'uniform'},
{'prior': Quantity([0, 100], 'ct/(s*kpc**2)'), 'type': 'uniform'},
{'prior': Quantity([0, 100], 'ct/(s*pix**2)'), 'type': 'uniform'}]
priors = [{'prior': Quantity([0, 3]), 'type': 'uniform'}, r_core_priors[xu_ind], norm_priors[yu_ind],
{'prior': Quantity([0, 3]), 'type': 'uniform'}, r_core_priors[xu_ind], norm_priors[yu_ind]]
nice_pars = [r"$\beta_{1}$", r"R$_{\rm{core},1}$", r"S$_{01}$", r"$\beta_{2}$", r"R$_{\rm{core},2}$",
r"S$_{02}$"]
info_dict = {'author': 'placeholder', 'year': 'placeholder', 'reference': 'placeholder',
'general': 'The double beta profile, a summation of two single beta models. Often\n '
'thought to deal better with peaky cluster cores that you might get from a\n'
' cool-core cluster, this model can be used to describe a galaxy cluster\n'
' radial surface brightness profile.'}
super().__init__(x_unit, y_unit, start_pars, priors, 'double_beta', 'Double Beta Profile', nice_pars,
'Surface Brightness', info_dict)
@staticmethod
def model(x: Quantity, beta_one: Quantity, r_core_one: Quantity, norm_one: Quantity, beta_two: Quantity,
r_core_two: Quantity, norm_two: Quantity) -> Quantity:
"""
The model function for the double beta profile.
:param Quantity x: The radii to calculate y values for.
:param Quantity norm_one: The normalisation of the first beta profile.
:param Quantity beta_one: The beta slope parameter of the first component beta profile.
:param Quantity r_core_one: The core radius of the first component beta profile.
:param Quantity norm_two: The normalisation of the second beta profile.
:param Quantity beta_two: The beta slope parameter of the second component beta profile.
:param Quantity r_core_two: The core radius of the second component beta profile.
:return: The y values corresponding to the input x values.
:rtype: Quantity
"""
p1 = norm_one * ((1 + ((x / r_core_one) ** 2)) ** ((-3 * beta_one) + 0.5))
p2 = norm_two * ((1 + ((x / r_core_two) ** 2)) ** ((-3 * beta_two) + 0.5))
return p1 + p2
def derivative(self, x: Quantity, dx: Quantity = Quantity(0, ''), use_par_dist: bool = False) -> Quantity:
"""
Calculates the gradient of the double beta profile at a given point, overriding the numerical method
implemented in the BaseModel1D class, as this simple model has an easily derivable first derivative.
:param Quantity x: The point(s) at which the slope of the model should be measured.
:param Quantity dx: This makes no difference here, as this is an analytical derivative. It has
been left in so that the inputs for this method don't vary between models.
:param bool use_par_dist: Should the parameter distributions be used to calculate a derivative
distribution; this can only be used if a fit has been performed using the model instance.
Default is False, in which case the current parameters will be used to calculate a single value.
:return: The calculated slope of the model at the supplied x position(s).
:rtype: Quantity
"""
x = x[..., None]
if not use_par_dist:
beta_one, r_core_one, norm_one, beta_two, r_core_two, norm_two = self._model_pars
else:
beta_one, r_core_one, norm_one, beta_two, r_core_two, norm_two = self.par_dists
p1 = ((2*x)/np.power(r_core_one, 2))*((-3*beta_one) + 0.5)*norm_one*np.power((1+(np.power(x/r_core_one, 2))),
((-3*beta_one)-0.5))
p2 = ((2*x)/np.power(r_core_two, 2))*((-3*beta_two)+0.5)*norm_two*np.power((1+(np.power(x/r_core_two, 2))),
((-3*beta_two)-0.5))
return p1 + p2
def inverse_abel(self, x: Quantity, use_par_dist: bool = False, method='analytical') -> Quantity:
"""
This overrides the inverse abel method of the model superclass, as there is an analytical solution to the
inverse abel transform of the double beta model. The form of the inverse abel transform is that of two summed
king profiles, but with extra transformations applied to the normalising parameters. This method can either
return a single value calculated using the current model parameters, or a distribution of values using
the parameter distributions (assuming that this model has had a fit run on it).
:param Quantity x: The x location(s) at which to calculate the value of the inverse abel transform.
:param bool use_par_dist: Should the parameter distributions be used to calculate a inverse abel transform
distribution; this can only be used if a fit has been performed using the model instance.
Default is False, in which case the current parameters will be used to calculate a single value.
:param str method: The method that should be used to calculate the values of this inverse abel transform.
Default for this overriding method is 'analytical', in which case the analytical solution is used.
You may pass 'direct', 'basex', 'hansenlaw', 'onion_bordas', 'onion_peeling', 'two_point', or
'three_point' to calculate the transform numerically.
:return: The inverse abel transform result.
:rtype: Quantity
"""
def transform(x_val: Quantity, beta: Quantity, r_core: Quantity, norm: Quantity, beta_two: Quantity,
r_core_two: Quantity, norm_two: Quantity):
"""
The function that calculates the inverse abel transform of this beta profile.
:param Quantity x_val: The x location(s) at which to calculate the value of the inverse abel transform.
:param Quantity beta: The beta parameter of the first beta profile.
:param Quantity r_core: The core radius parameter of the first beta profile.
:param Quantity norm: The normalisation of the first beta profile.
:param Quantity beta_two: The beta parameter of the second beta profile.
:param Quantity r_core_two: The core radius parameter of the second beta profile.
:param Quantity norm_two: The normalisation of the second beta profile.
:return:
"""
# We calculate the new normalisation parameter
new_norm = norm / ((gamma((3 * beta) - 0.5) * np.sqrt(np.pi) * r_core) / gamma(3 * beta))
new_norm_two = norm_two / ((gamma((3 * beta_two) - 0.5) * np.sqrt(np.pi)
* r_core_two) / gamma(3 * beta_two))
# Then return the value of the transformed beta profile
return new_norm * np.power((1 + (np.power(x_val / r_core, 2))), (-3 * beta)) + \
new_norm_two * np.power((1 + (np.power(x_val / r_core_two, 2))), (-3 * beta_two))
# Checking x units to make sure that they are valid
if not x.unit.is_equivalent(self._x_unit):
raise UnitConversionError("The input x coordinates cannot be converted to units of "
"{}".format(self._x_unit.to_string()))
else:
x = x.to(self._x_unit)
if method == 'analytical':
# The way the calculation is called depends on whether the user wants to use the parameter distributions
# or just the current model parameter values to calculate the inverse abel transform.
if not use_par_dist:
transform_res = transform(x, *self.model_pars)
elif use_par_dist and len(self._par_dists[0]) != 0:
transform_res = transform(x[..., None], *self.par_dists)
elif use_par_dist and len(self._par_dists[0]) == 0:
raise XGAFitError("No fit has been performed with this model, so there are no parameter distributions"
" available.")
else:
transform_res = super().inverse_abel(x, use_par_dist, method)
return transform_res
# So that things like fitting functions can be written generally to support different models
SB_MODELS = {"beta": BetaProfile1D, "double_beta": DoubleBetaProfile1D}
SB_MODELS_PUB_NAMES = {n: m().publication_name for n, m in SB_MODELS.items()}
SB_MODELS_PAR_NAMES = {n: m().par_publication_names for n, m in SB_MODELS.items()}
| 61.955026
| 120
| 0.622785
| 3,269
| 23,419
| 4.342307
| 0.10829
| 0.017612
| 0.017753
| 0.027052
| 0.909334
| 0.897922
| 0.886368
| 0.860514
| 0.836562
| 0.808454
| 0
| 0.018804
| 0.277894
| 23,419
| 377
| 121
| 62.119363
| 0.82059
| 0.423588
| 0
| 0.659091
| 1
| 0
| 0.15277
| 0
| 0
| 0
| 0
| 0.005305
| 0
| 1
| 0.056818
| false
| 0
| 0.039773
| 0
| 0.153409
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ad3b274d8d247a70735d55a64d762861d0f86c58
| 338
|
py
|
Python
|
common/determined_common/schemas/__init__.py
|
ryantd/determined
|
b4f3be3c1878a9a7fdad4775647018753b39ef21
|
[
"Apache-2.0"
] | 1
|
2021-03-29T13:39:45.000Z
|
2021-03-29T13:39:45.000Z
|
common/determined_common/schemas/__init__.py
|
ZithaChitra/determined
|
1466d46dfd6abc56ad65d9904d4173ea62cff771
|
[
"Apache-2.0"
] | null | null | null |
common/determined_common/schemas/__init__.py
|
ZithaChitra/determined
|
1466d46dfd6abc56ad65d9904d4173ea62cff771
|
[
"Apache-2.0"
] | null | null | null |
# Avoid automatically importing any generated objects in this module, since those imports are
# non-trivial and would affect the user experience in the cli.
from determined_common.schemas._auto_init import auto_init
from determined_common.schemas._schema_base import SchemaBase
from determined_common.schemas._union_base import UnionBase
| 56.333333
| 93
| 0.857988
| 49
| 338
| 5.714286
| 0.693878
| 0.15
| 0.214286
| 0.289286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109467
| 338
| 5
| 94
| 67.6
| 0.930233
| 0.449704
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ad7189403c72a9f266beea03cbba1f3b656e3726
| 82
|
py
|
Python
|
test_pe0001.py
|
guandalf/projecteuler
|
2986f12ace33bac92dd2c39294343d3bbb605d32
|
[
"MIT"
] | null | null | null |
test_pe0001.py
|
guandalf/projecteuler
|
2986f12ace33bac92dd2c39294343d3bbb605d32
|
[
"MIT"
] | null | null | null |
test_pe0001.py
|
guandalf/projecteuler
|
2986f12ace33bac92dd2c39294343d3bbb605d32
|
[
"MIT"
] | null | null | null |
import pytest
from pe0001 import *
def test_pe0001():
assert pe0001(10) == 23
| 16.4
| 27
| 0.707317
| 12
| 82
| 4.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.242424
| 0.195122
| 82
| 5
| 27
| 16.4
| 0.621212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a8e496346ab6cc6d554f816659fa97d579f7b018
| 20,471
|
py
|
Python
|
utils/neu/metrics/gan_eval/inceptionv3.py
|
shikisawamura/nnabla-examples
|
baf4e4cc620dedbf4368683325c0fb868676850d
|
[
"Apache-2.0"
] | null | null | null |
utils/neu/metrics/gan_eval/inceptionv3.py
|
shikisawamura/nnabla-examples
|
baf4e4cc620dedbf4368683325c0fb868676850d
|
[
"Apache-2.0"
] | null | null | null |
utils/neu/metrics/gan_eval/inceptionv3.py
|
shikisawamura/nnabla-examples
|
baf4e4cc620dedbf4368683325c0fb868676850d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import numpy as np
def construct_inceptionv3(x, use_up_to="pool"):
def stem_block(input_variable, outmaps, kernel=(3, 3), pad=(0, 0), stride=(1, 1), eps=1e-3):
with nn.parameter_scope(f"Convolution"):
h = PF.convolution(input_variable, outmaps=outmaps,
kernel=kernel, pad=pad, stride=stride, with_bias=False)
with nn.parameter_scope(f"BatchNormalization"):
h = PF.batch_normalization(h, batch_stat=False, eps=eps)
h = F.relu(h)
return h
def module_A(input_variable, is_first=False, eps=1e-3):
with nn.parameter_scope(f"Conv"):
with nn.parameter_scope("Convolution"):
h0 = PF.convolution(input_variable, outmaps=64, kernel=(
1, 1), pad=(0, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h0 = PF.batch_normalization(h0, batch_stat=False, eps=eps)
h0 = F.relu(h0)
#################################################################
with nn.parameter_scope(f"Conv_2"):
with nn.parameter_scope("Convolution"):
h1 = PF.convolution(input_variable, outmaps=48, kernel=(
1, 1), pad=(0, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h1 = PF.batch_normalization(h1, batch_stat=False, eps=eps)
h1 = F.relu(h1)
with nn.parameter_scope(f"Conv_3"):
with nn.parameter_scope("Convolution"):
h1 = PF.convolution(h1, outmaps=64, kernel=(
5, 5), pad=(2, 2), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h1 = PF.batch_normalization(h1, batch_stat=False, eps=eps)
h1 = F.relu(h1)
#################################################################
with nn.parameter_scope(f"Conv_4"):
with nn.parameter_scope("Convolution"):
h2 = PF.convolution(input_variable, outmaps=64, kernel=(
1, 1), pad=(0, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h2 = PF.batch_normalization(h2, batch_stat=False, eps=eps)
h2 = F.relu(h2)
with nn.parameter_scope(f"Conv_5"):
with nn.parameter_scope("Convolution"):
h2 = PF.convolution(h2, outmaps=96, kernel=(
3, 3), pad=(1, 1), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h2 = PF.batch_normalization(h2, batch_stat=False, eps=eps)
h2 = F.relu(h2)
with nn.parameter_scope(f"Conv_6"):
with nn.parameter_scope("Convolution"):
h2 = PF.convolution(h2, outmaps=96, kernel=(
3, 3), pad=(1, 1), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h2 = PF.batch_normalization(h2, batch_stat=False, eps=eps)
h2 = F.relu(h2)
#################################################################
with nn.parameter_scope(f"Conv_7"):
h3 = F.average_pooling(input_variable, kernel=(
3, 3), pad=(1, 1), stride=(1, 1), including_pad=False)
with nn.parameter_scope("Convolution"):
if is_first:
outmaps = 32
else:
outmaps = 64
h3 = PF.convolution(h3, outmaps=outmaps, kernel=(
1, 1), pad=(0, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h3 = PF.batch_normalization(h3, batch_stat=False, eps=eps)
h3 = F.relu(h3)
h = F.concatenate(*[h0, h1, h2, h3], axis=1)
return h
def grid_size_reduction_A(input_variable, eps=1e-3):
with nn.parameter_scope(f"Conv"):
with nn.parameter_scope("Convolution"):
h1 = PF.convolution(input_variable, outmaps=384, kernel=(
3, 3), pad=(0, 0), stride=(2, 2), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h1 = PF.batch_normalization(h1, batch_stat=False, eps=eps)
h1 = F.relu(h1)
#################################################################
with nn.parameter_scope(f"Conv_4"):
with nn.parameter_scope("Convolution"):
h2 = PF.convolution(input_variable, outmaps=64, kernel=(
1, 1), pad=(0, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h2 = PF.batch_normalization(h2, batch_stat=False, eps=eps)
h2 = F.relu(h2)
with nn.parameter_scope(f"Conv_5"):
with nn.parameter_scope("Convolution"):
h2 = PF.convolution(h2, outmaps=96, kernel=(
3, 3), pad=(1, 1), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h2 = PF.batch_normalization(h2, batch_stat=False, eps=eps)
h2 = F.relu(h2)
with nn.parameter_scope(f"Conv_6"):
with nn.parameter_scope("Convolution"):
h2 = PF.convolution(h2, outmaps=96, kernel=(
3, 3), pad=(0, 0), stride=(2, 2), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h2 = PF.batch_normalization(h2, batch_stat=False, eps=eps)
h2 = F.relu(h2)
#################################################################
h3 = F.max_pooling(input_variable, kernel=(3, 3),
pad=(0, 0), stride=(2, 2))
h = F.concatenate(*[h1, h2, h3], axis=1)
return h
def module_B(input_variable, internal_outmaps=128, eps=1e-3):
with nn.parameter_scope(f"Conv"):
with nn.parameter_scope("Convolution"):
h0 = PF.convolution(input_variable, outmaps=192, kernel=(
1, 1), pad=(0, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h0 = PF.batch_normalization(h0, batch_stat=False, eps=eps)
h0 = F.relu(h0)
#################################################################
with nn.parameter_scope(f"Conv_2"):
with nn.parameter_scope("Convolution"):
h1 = PF.convolution(input_variable, outmaps=internal_outmaps, kernel=(
1, 1), pad=(0, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h1 = PF.batch_normalization(h1, batch_stat=False, eps=eps)
h1 = F.relu(h1)
with nn.parameter_scope(f"Conv_8"):
with nn.parameter_scope("Convolution"):
h1 = PF.convolution(h1, outmaps=internal_outmaps, kernel=(
1, 7), pad=(0, 3), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h1 = PF.batch_normalization(h1, batch_stat=False, eps=eps)
h1 = F.relu(h1)
with nn.parameter_scope(f"Conv_3"):
with nn.parameter_scope("Convolution"):
h1 = PF.convolution(h1, outmaps=192, kernel=(
7, 1), pad=(3, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h1 = PF.batch_normalization(h1, batch_stat=False, eps=eps)
h1 = F.relu(h1)
#################################################################
with nn.parameter_scope(f"Conv_4"):
with nn.parameter_scope("Convolution"):
h2 = PF.convolution(input_variable, outmaps=internal_outmaps, kernel=(
1, 1), pad=(0, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h2 = PF.batch_normalization(h2, batch_stat=False, eps=eps)
h2 = F.relu(h2)
with nn.parameter_scope(f"Conv_9"):
with nn.parameter_scope("Convolution"):
h2 = PF.convolution(h2, outmaps=internal_outmaps, kernel=(
7, 1), pad=(3, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h2 = PF.batch_normalization(h2, batch_stat=False, eps=eps)
h2 = F.relu(h2)
with nn.parameter_scope(f"Conv_10"):
with nn.parameter_scope("Convolution"):
h2 = PF.convolution(h2, outmaps=internal_outmaps, kernel=(
1, 7), pad=(0, 3), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h2 = PF.batch_normalization(h2, batch_stat=False, eps=eps)
h2 = F.relu(h2)
with nn.parameter_scope(f"Conv_5"):
with nn.parameter_scope("Convolution"):
h2 = PF.convolution(h2, outmaps=internal_outmaps, kernel=(
7, 1), pad=(3, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h2 = PF.batch_normalization(h2, batch_stat=False, eps=eps)
h2 = F.relu(h2)
with nn.parameter_scope(f"Conv_6"):
with nn.parameter_scope("Convolution"):
h2 = PF.convolution(h2, outmaps=192, kernel=(
1, 7), pad=(0, 3), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h2 = PF.batch_normalization(h2, batch_stat=False, eps=eps)
h2 = F.relu(h2)
#################################################################
with nn.parameter_scope(f"Conv_7"):
h3 = F.average_pooling(input_variable, kernel=(
3, 3), pad=(1, 1), stride=(1, 1), including_pad=False)
with nn.parameter_scope("Convolution"):
h3 = PF.convolution(h3, outmaps=192, kernel=(
1, 1), pad=(0, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h3 = PF.batch_normalization(h3, batch_stat=False, eps=eps)
h3 = F.relu(h3)
h = F.concatenate(*[h0, h1, h2, h3], axis=1)
return h
def grid_size_reduction_B(input_variable, eps=1e-3):
with nn.parameter_scope(f"Conv_2"):
with nn.parameter_scope("Convolution"):
h1 = PF.convolution(input_variable, outmaps=192, kernel=(
1, 1), pad=(0, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h1 = PF.batch_normalization(h1, batch_stat=False, eps=eps)
h1 = F.relu(h1)
with nn.parameter_scope(f"Conv"):
with nn.parameter_scope("Convolution"):
h1 = PF.convolution(h1, outmaps=320, kernel=(
3, 3), pad=(0, 0), stride=(2, 2), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h1 = PF.batch_normalization(h1, batch_stat=False, eps=eps)
h1 = F.relu(h1)
###########################################################
with nn.parameter_scope(f"Conv_4"):
with nn.parameter_scope("Convolution"):
h2 = PF.convolution(input_variable, outmaps=192, kernel=(
1, 1), pad=(0, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h2 = PF.batch_normalization(h2, batch_stat=False, eps=eps)
h2 = F.relu(h2)
with nn.parameter_scope(f"Conv_5"):
with nn.parameter_scope("Convolution"):
h2 = PF.convolution(h2, outmaps=192, kernel=(
1, 7), pad=(0, 3), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h2 = PF.batch_normalization(h2, batch_stat=False, eps=eps)
h2 = F.relu(h2)
with nn.parameter_scope(f"Conv_3"):
with nn.parameter_scope("Convolution"):
h2 = PF.convolution(h2, outmaps=192, kernel=(
7, 1), pad=(3, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h2 = PF.batch_normalization(h2, batch_stat=False, eps=eps)
h2 = F.relu(h2)
with nn.parameter_scope(f"Conv_6"):
with nn.parameter_scope("Convolution"):
h2 = PF.convolution(h2, outmaps=192, kernel=(
3, 3), pad=(0, 0), stride=(2, 2), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h2 = PF.batch_normalization(h2, batch_stat=False, eps=eps)
h2 = F.relu(h2)
#################################################################
h3 = F.max_pooling(input_variable, kernel=(3, 3),
pad=(0, 0), stride=(2, 2))
h = F.concatenate(*[h1, h2, h3], axis=1)
return h
def module_C(input_variable, use_max_pool=False, eps=1e-3):
with nn.parameter_scope(f"Conv"):
with nn.parameter_scope("Convolution"):
h0 = PF.convolution(input_variable, outmaps=320, kernel=(
1, 1), pad=(0, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h0 = PF.batch_normalization(h0, batch_stat=False, eps=eps)
h0 = F.relu(h0)
#################################################################
with nn.parameter_scope(f"Conv_2"):
with nn.parameter_scope("Convolution"):
h1 = PF.convolution(input_variable, outmaps=384, kernel=(
1, 1), pad=(0, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h1 = PF.batch_normalization(h1, batch_stat=False, eps=eps)
h1 = F.relu(h1)
with nn.parameter_scope(f"Conv_3"):
with nn.parameter_scope("Convolution"):
h11 = PF.convolution(h1, outmaps=384, kernel=(
1, 3), pad=(0, 1), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h11 = PF.batch_normalization(h11, batch_stat=False, eps=eps)
h11 = F.relu(h11)
with nn.parameter_scope(f"Conv_8"):
with nn.parameter_scope("Convolution"):
h12 = PF.convolution(h1, outmaps=384, kernel=(
3, 1), pad=(1, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h12 = PF.batch_normalization(h12, batch_stat=False, eps=eps)
h12 = F.relu(h12)
#################################################################
with nn.parameter_scope(f"Conv_4"):
with nn.parameter_scope("Convolution"):
h2 = PF.convolution(input_variable, outmaps=448, kernel=(
1, 1), pad=(0, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h2 = PF.batch_normalization(h2, batch_stat=False, eps=eps)
h2 = F.relu(h2)
with nn.parameter_scope(f"Conv_5"):
with nn.parameter_scope("Convolution"):
h2 = PF.convolution(h2, outmaps=384, kernel=(
3, 3), pad=(1, 1), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h2 = PF.batch_normalization(h2, batch_stat=False, eps=eps)
h2 = F.relu(h2)
with nn.parameter_scope(f"Conv_6"):
with nn.parameter_scope("Convolution"):
h21 = PF.convolution(h2, outmaps=384, kernel=(
1, 3), pad=(0, 1), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h21 = PF.batch_normalization(h21, batch_stat=False, eps=eps)
h21 = F.relu(h21)
with nn.parameter_scope(f"Conv_9"):
with nn.parameter_scope("Convolution"):
h22 = PF.convolution(h2, outmaps=384, kernel=(
3, 1), pad=(1, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h22 = PF.batch_normalization(h22, batch_stat=False, eps=eps)
h22 = F.relu(h22)
#################################################################
with nn.parameter_scope(f"Conv_7"):
if use_max_pool:
h3 = F.max_pooling(input_variable, kernel=(
3, 3), stride=(1, 1), pad=(1, 1))
else:
h3 = F.average_pooling(input_variable, kernel=(
3, 3), pad=(1, 1), stride=(1, 1), including_pad=False)
with nn.parameter_scope("Convolution"):
h3 = PF.convolution(h3, outmaps=192, kernel=(
1, 1), pad=(0, 0), stride=(1, 1), with_bias=False)
with nn.parameter_scope("BatchNormalization"):
h3 = PF.batch_normalization(h3, batch_stat=False, eps=eps)
h3 = F.relu(h3)
h = F.concatenate(*[h0, h11, h12, h21, h22, h3], axis=1)
return h
with nn.parameter_scope("Conv"):
conv1 = stem_block(x, outmaps=32, kernel=(3, 3), stride=(2, 2))
with nn.parameter_scope("Conv_2"):
conv2 = stem_block(conv1, outmaps=32, kernel=(3, 3), stride=(1, 1))
with nn.parameter_scope("Conv_3"):
conv3 = stem_block(conv2, outmaps=64, kernel=(3, 3),
pad=(1, 1), stride=(1, 1))
pool1 = F.max_pooling(conv3, kernel=(3, 3), stride=(2, 2))
with nn.parameter_scope("Conv_4"):
conv4 = stem_block(pool1, outmaps=80, kernel=(1, 1), stride=(1, 1))
with nn.parameter_scope("Conv_5"):
conv5 = stem_block(conv4, outmaps=192, kernel=(3, 3), stride=(1, 1))
pool2 = F.max_pooling(conv5, kernel=(3, 3), stride=(2, 2))
with nn.parameter_scope("Inception"):
mixed = module_A(pool2, is_first=True)
with nn.parameter_scope("Inception_2"):
mixed_1 = module_A(mixed)
with nn.parameter_scope("Inception_3"):
mixed_2 = module_A(mixed_1)
with nn.parameter_scope("Inception_4"):
mixed_3 = grid_size_reduction_A(mixed_2)
with nn.parameter_scope("Inception_5"):
mixed_4 = module_B(mixed_3)
with nn.parameter_scope("Inception_6"):
mixed_5 = module_B(mixed_4, internal_outmaps=160)
with nn.parameter_scope("Inception_7"):
mixed_6 = module_B(mixed_5, internal_outmaps=160)
with nn.parameter_scope("Inception_8"):
mixed_7 = module_B(mixed_6, internal_outmaps=192)
with nn.parameter_scope("Inception_9"):
mixed_8 = grid_size_reduction_B(mixed_7)
with nn.parameter_scope("Inception_10"):
mixed_9 = module_C(mixed_8)
with nn.parameter_scope("Inception_11"):
mixed_10 = module_C(mixed_9, use_max_pool=True)
if use_up_to == "prepool":
return mixed_10
pooled = F.average_pooling(mixed_10, mixed_10.shape[2:])
if use_up_to == "pool":
pooled = F.reshape(pooled, pooled.shape[:2])
return pooled
with nn.parameter_scope("Affine"):
# note that this contains bias NOT USED for Inception Score.
classifier = PF.affine(pooled, 1008)
return classifier
def main():
x = nn.Variable((1, 3, 299, 299))
x.d = np.random.random(x.shape)
pooled = construct_inceptionv3(x)
print(pooled.shape)
if __name__ == '__main__':
main()
| 43.096842
| 96
| 0.547018
| 2,525
| 20,471
| 4.272871
| 0.070099
| 0.070627
| 0.176569
| 0.235425
| 0.828251
| 0.790064
| 0.777273
| 0.768375
| 0.751228
| 0.74613
| 0
| 0.053093
| 0.286943
| 20,471
| 474
| 97
| 43.187764
| 0.686031
| 0.031166
| 0
| 0.702006
| 0
| 0
| 0.076822
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022923
| false
| 0
| 0.011461
| 0
| 0.060172
| 0.002865
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d167a53d7abfb02570d5be2029004dfd1b9672da
| 215
|
py
|
Python
|
invite.py
|
p1ngu1n0/htb-scripts
|
2d9c5212a8fcba6a807925253ade403ac0ef0bab
|
[
"MIT"
] | null | null | null |
invite.py
|
p1ngu1n0/htb-scripts
|
2d9c5212a8fcba6a807925253ade403ac0ef0bab
|
[
"MIT"
] | null | null | null |
invite.py
|
p1ngu1n0/htb-scripts
|
2d9c5212a8fcba6a807925253ade403ac0ef0bab
|
[
"MIT"
] | null | null | null |
import requests; import base64; print("Codigo de invitacion es: ", base64.b64decode(requests.post("https://www.hackthebox.eu/api/invite/generate", headers={'User-Agent': 'Custom'}).json()["data"]["code"]).decode())
| 215
| 215
| 0.72093
| 28
| 215
| 5.535714
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029557
| 0.055814
| 215
| 1
| 215
| 215
| 0.73399
| 0
| 0
| 0
| 1
| 0
| 0.435185
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
0f689b341f290a9b1fef5710ea349a74dcad6306
| 45
|
py
|
Python
|
Lekcija14/script01.py
|
islamspahic/python-uup
|
ea7c9c655ad8e678bca5ee52138836732266799f
|
[
"Apache-2.0"
] | null | null | null |
Lekcija14/script01.py
|
islamspahic/python-uup
|
ea7c9c655ad8e678bca5ee52138836732266799f
|
[
"Apache-2.0"
] | null | null | null |
Lekcija14/script01.py
|
islamspahic/python-uup
|
ea7c9c655ad8e678bca5ee52138836732266799f
|
[
"Apache-2.0"
] | null | null | null |
n = (1, 2, 3, 99, 10)
print(n)
print(n[2])
| 7.5
| 21
| 0.466667
| 11
| 45
| 1.909091
| 0.636364
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 0.244444
| 45
| 5
| 22
| 9
| 0.382353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
0f7f4408a133d9b27536611c2581945e9f1f9254
| 210
|
py
|
Python
|
pkgsettings/__init__.py
|
kpn-digital/py-pkgsettings
|
fe9e1f6739b1ff873cbb8b534f48a18c624495fd
|
[
"Apache-2.0"
] | 5
|
2016-05-12T15:34:24.000Z
|
2021-10-16T07:47:09.000Z
|
pkgsettings/__init__.py
|
kpn-digital/py-pkgsettings
|
fe9e1f6739b1ff873cbb8b534f48a18c624495fd
|
[
"Apache-2.0"
] | 12
|
2016-03-14T11:23:09.000Z
|
2018-08-02T16:09:19.000Z
|
pkgsettings/__init__.py
|
kpn-digital/py-pkgsettings
|
fe9e1f6739b1ff873cbb8b534f48a18c624495fd
|
[
"Apache-2.0"
] | 7
|
2016-05-11T10:23:45.000Z
|
2019-07-03T12:58:09.000Z
|
# -*- coding: utf-8 -*-
from .pkgsettings import DuplicateConfigureWarning, PrefixedSettings, Settings, SimpleSettings
__all__ = ["DuplicateConfigureWarning", "PrefixedSettings", "Settings", "SimpleSettings"]
| 42
| 94
| 0.780952
| 15
| 210
| 10.666667
| 0.733333
| 0.5125
| 0.6125
| 0.7875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005236
| 0.090476
| 210
| 4
| 95
| 52.5
| 0.832461
| 0.1
| 0
| 0
| 0
| 0
| 0.336898
| 0.13369
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
7e3c715664c985d1db405d0d09a47271f4827e1d
| 11
|
py
|
Python
|
python/testData/psi/MissingListSeparators.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/psi/MissingListSeparators.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/psi/MissingListSeparators.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
a = [1 2 3]
| 11
| 11
| 0.363636
| 4
| 11
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 0.363636
| 11
| 1
| 11
| 11
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7e58db1f6b93777b911bdc6741def6a42149b102
| 161
|
py
|
Python
|
tests/parser/grounding.12.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/grounding.12.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/grounding.12.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
g(a).
g(c).
h(a) :- i.
i :- g(Lit), not h(Lit).
"""
output = """
g(a).
g(c).
h(a) :- i.
i :- g(Lit), not h(Lit).
"""
| 6.44
| 25
| 0.304348
| 28
| 161
| 1.75
| 0.321429
| 0.081633
| 0.122449
| 0.163265
| 0.77551
| 0.77551
| 0.77551
| 0.77551
| 0.77551
| 0.77551
| 0
| 0
| 0.372671
| 161
| 24
| 26
| 6.708333
| 0.485149
| 0
| 0
| 0.833333
| 0
| 0
| 0.780142
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
7e787e7b2b4f98cf0b3eccbee86a523ba522075c
| 151
|
py
|
Python
|
8.ddpg_for_grid/common/__init__.py
|
quantumiracle/DQN_traffic_light_-control
|
464c17ba25ebcb49f78d6cdcc96d7fe3764d7508
|
[
"Apache-2.0"
] | 52
|
2019-03-10T01:56:32.000Z
|
2022-03-02T05:00:09.000Z
|
common/__init__.py
|
chi6/Model-based-meta-learning-rl
|
fda134dcbd87ef3e91f339ea2f836f28ec5f7784
|
[
"MIT"
] | 2
|
2019-09-10T07:30:54.000Z
|
2022-02-20T12:39:20.000Z
|
common/__init__.py
|
chi6/Model-based-meta-learning-rl
|
fda134dcbd87ef3e91f339ea2f836f28ec5f7784
|
[
"MIT"
] | 20
|
2019-04-26T01:30:45.000Z
|
2022-03-08T05:42:22.000Z
|
# flake8: noqa F403
from common.console_util import *
from common.dataset import Dataset
from common.math_util import *
from common.misc_util import *
| 25.166667
| 34
| 0.807947
| 23
| 151
| 5.173913
| 0.478261
| 0.336134
| 0.235294
| 0.336134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030534
| 0.13245
| 151
| 5
| 35
| 30.2
| 0.877863
| 0.112583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
7d34992b156d6bcfa840f016690f7a725bde2aff
| 201
|
py
|
Python
|
xmas12_1.py
|
abdulfaizp/adventofcode
|
11dd475312d69aadfa341a5d1e39b521cb6afe7c
|
[
"CC0-1.0"
] | null | null | null |
xmas12_1.py
|
abdulfaizp/adventofcode
|
11dd475312d69aadfa341a5d1e39b521cb6afe7c
|
[
"CC0-1.0"
] | null | null | null |
xmas12_1.py
|
abdulfaizp/adventofcode
|
11dd475312d69aadfa341a5d1e39b521cb6afe7c
|
[
"CC0-1.0"
] | null | null | null |
import json
file=open("input12.txt")
data=file.read()
print json.dumps(data, sort_keys=True, indent=5, separators=(',', ':'))
# print json.dumps(data, sort_keys=True, indent=5, separators=(',', ':'))
| 28.714286
| 73
| 0.676617
| 29
| 201
| 4.62069
| 0.551724
| 0.134328
| 0.208955
| 0.268657
| 0.701493
| 0.701493
| 0.701493
| 0.701493
| 0.701493
| 0.701493
| 0
| 0.021858
| 0.089552
| 201
| 7
| 73
| 28.714286
| 0.710383
| 0.353234
| 0
| 0
| 0
| 0
| 0.100775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.25
| null | null | 0.25
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
adcb9cd359fef04e0bc7a1e93da203b748d97cec
| 711
|
py
|
Python
|
syncer/src/addresses.py
|
bravo-kernel/ergowatch
|
858c62369a7afdd393c722abe8d22788a456ca50
|
[
"MIT"
] | 7
|
2021-12-07T19:19:15.000Z
|
2022-02-19T20:40:33.000Z
|
syncer/src/addresses.py
|
bravo-kernel/ergowatch
|
858c62369a7afdd393c722abe8d22788a456ca50
|
[
"MIT"
] | 19
|
2021-08-18T02:45:56.000Z
|
2022-03-30T02:00:49.000Z
|
syncer/src/addresses.py
|
bravo-kernel/ergowatch
|
858c62369a7afdd393c722abe8d22788a456ca50
|
[
"MIT"
] | 2
|
2022-01-05T20:07:10.000Z
|
2022-02-19T20:40:55.000Z
|
coinbase = '2Z4YBkDsDvQj8BX7xiySFewjitqp2ge9c99jfes2whbtKitZTxdBYqbrVZUvZvKv6aqn9by4kp3LE1c26LCyosFnVnm6b6U1JYvWpYmL2ZnixJbXLjWAWuBThV1D6dLpqZJYQHYDznJCk49g5TUiS4q8khpag2aNmHwREV7JSsypHdHLgJT7MGaw51aJfNubyzSKxZ4AJXFS27EfXwyCLzW1K6GVqwkJtCoPvrcLqmqwacAWJPkmh78nke9H4oT88XmSbRt2n9aWZjosiZCafZ4osUDxmZcc5QVEeTWn8drSraY3eFKe8Mu9MSCcVU'
tx_fees = '2iHkR7CWvD1R4j1yZg5bkeDRQavjAaVPeTDFGGLZduHyfWMuYpmhHocX8GJoaieTx78FntzJbCBVL6rf96ocJoZdmWBL2fci7NqWgAirppPQmZ7fN9V6z13Ay6brPriBKYqLp1bT2Fk4FkFLCfdPpe'
treasury = '4L1ktFSzm3SH1UioDuUf5hyaraHird4D2dEACwQ1qHGjSKtA6KaNvSzRCZXZGf9jkfNAEC1SrYaZmCuvb2BKiXk5zW9xuvrXFT7FdNe2KqbymiZvo5UQLAm5jQY8ZBRhTZ4AFtZa1UF5nd4aofwPiL7YkJuyiL5hDHMZL1ZnyL746tHmRYMjAhCgE7d698dRhkdSeVy'
| 177.75
| 332
| 0.970464
| 7
| 711
| 98.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153076
| 0.016878
| 711
| 3
| 333
| 237
| 0.832618
| 0
| 0
| 0
| 0
| 0
| 0.94209
| 0.94209
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
70b567b86bd190a7cbaa100bcc8a19a724d84768
| 8,564
|
py
|
Python
|
backend/test_common.py
|
unicorn1337x/stopthevirus
|
7a67d8a6a6d0cbc5f58b45b605aeef0c5c407304
|
[
"MIT"
] | 9
|
2020-03-30T00:20:28.000Z
|
2020-11-29T07:24:02.000Z
|
backend/test_common.py
|
unicorn1337x/stopthevirus
|
7a67d8a6a6d0cbc5f58b45b605aeef0c5c407304
|
[
"MIT"
] | 109
|
2020-03-28T20:51:48.000Z
|
2020-12-21T11:01:15.000Z
|
backend/test_common.py
|
unicorn1337x/stopthevirus
|
7a67d8a6a6d0cbc5f58b45b605aeef0c5c407304
|
[
"MIT"
] | 4
|
2020-04-01T03:05:56.000Z
|
2020-11-29T07:24:14.000Z
|
import unittest
from game_engine.common import GameSchedule, STV_I18N_TABLE
import datetime
from datetime import datetime, date, time, timedelta
import pytz
class CommonTest(unittest.TestCase):
def test_us_today_localized_string(self):
schedule = STV_I18N_TABLE['US']
self.assertRegex(
schedule.today_localized_string,
"[0-9]+/[0-9]+"
)
def test_us_tomorrow_localized_string(self):
schedule = STV_I18N_TABLE['US']
self.assertRegex(
schedule.tomorrow_localized_string,
"[0-9]+/[0-9]+"
)
def test_us_localized_time_string(self):
schedule = STV_I18N_TABLE['US']
self.assertRegex(
schedule.localized_time_string(
time=schedule.game_start_time
),
"12pm (EST|EDT)"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_challenge_start_time
),
"12pm (EST|EDT)"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_challenge_end_time
),
"6pm (EST|EDT)"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_tribal_council_start_time
),
"7pm (EST|EDT)"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_tribal_council_end_time
),
"9pm (EST|EDT)"
)
def test_us_localized_time_delta_sec(self):
schedule = STV_I18N_TABLE['US']
self.assertAlmostEqual(
schedule.localized_time_delta_sec(
end_time=(datetime.now() + timedelta(seconds=5.0)).time()
),
5.0,
places=3
)
def test_uk_today_localized_string(self):
schedule = STV_I18N_TABLE['UK']
self.assertRegex(
schedule.today_localized_string,
"[0-9]+/[0-9]+"
)
def test_uk_tomorrow_localized_string(self):
schedule = STV_I18N_TABLE['UK']
self.assertRegex(
schedule.tomorrow_localized_string,
"[0-9]+/[0-9]+"
)
def test_uk_localized_time_string(self):
schedule = STV_I18N_TABLE['UK']
self.assertRegex(
schedule.localized_time_string(
time=schedule.game_start_time
),
"12pm BST"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_challenge_start_time
),
"12pm BST"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_challenge_end_time
),
"6pm BST"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_tribal_council_start_time
),
"7pm BST"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_tribal_council_end_time
),
"9pm BST"
)
def test_uk_localized_time_delta_sec(self):
schedule = STV_I18N_TABLE['UK']
self.assertAlmostEqual(
schedule.localized_time_delta_sec(
end_time=(datetime.now() + timedelta(seconds=5.0)).time()
),
5.0,
places=3
)
def test_jp_today_localized_string(self):
schedule = STV_I18N_TABLE['JP']
self.assertRegex(
schedule.today_localized_string,
"[0-9]+/[0-9]+"
)
def test_jp_tomorrow_localized_string(self):
schedule = STV_I18N_TABLE['JP']
self.assertRegex(
schedule.tomorrow_localized_string,
"[0-9]+/[0-9]+"
)
def test_jp_localized_time_string(self):
schedule = STV_I18N_TABLE['JP']
self.assertRegex(
schedule.localized_time_string(
time=schedule.game_start_time
),
"12pm JST"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_challenge_start_time
),
"12pm JST"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_challenge_end_time
),
"6pm JST"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_tribal_council_start_time
),
"7pm JST"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_tribal_council_end_time
),
"9pm JST"
)
def test_jp_localized_time_delta_sec(self):
schedule = STV_I18N_TABLE['JP']
self.assertAlmostEqual(
schedule.localized_time_delta_sec(
end_time=(datetime.now() + timedelta(seconds=5.0)).time()
),
5.0,
places=3
)
def test_it_today_localized_string(self):
schedule = STV_I18N_TABLE['IT']
self.assertRegex(
schedule.today_localized_string,
"[0-9]+/[0-9]+"
)
def test_it_tomorrow_localized_string(self):
schedule = STV_I18N_TABLE['IT']
self.assertRegex(
schedule.tomorrow_localized_string,
"[0-9]+/[0-9]+"
)
def test_it_localized_time_string(self):
schedule = STV_I18N_TABLE['IT']
self.assertRegex(
schedule.localized_time_string(
time=schedule.game_start_time
),
"12pm CEST"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_challenge_start_time
),
"12pm CEST"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_challenge_end_time
),
"6pm CEST"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_tribal_council_start_time
),
"7pm CEST"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_tribal_council_end_time
),
"9pm CEST"
)
def test_it_localized_time_delta_sec(self):
schedule = STV_I18N_TABLE['IT']
self.assertAlmostEqual(
schedule.localized_time_delta_sec(
end_time=(datetime.now() + timedelta(seconds=5.0)).time()
),
5.0,
places=3
)
def test_de_today_localized_string(self):
schedule = STV_I18N_TABLE['DE']
self.assertRegex(
schedule.today_localized_string,
"[0-9]+/[0-9]+"
)
def test_de_tomorrow_localized_string(self):
schedule = STV_I18N_TABLE['DE']
self.assertRegex(
schedule.tomorrow_localized_string,
"[0-9]+/[0-9]+"
)
def test_de_localized_time_string(self):
schedule = STV_I18N_TABLE['DE']
self.assertRegex(
schedule.localized_time_string(
time=schedule.game_start_time
),
"12pm CEST"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_challenge_start_time
),
"12pm CEST"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_challenge_end_time
),
"6pm CEST"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_tribal_council_start_time
),
"7pm CEST"
)
self.assertRegex(
schedule.localized_time_string(
time=schedule.daily_tribal_council_end_time
),
"9pm CEST"
)
def test_de_localized_time_delta_sec(self):
schedule = STV_I18N_TABLE['DE']
self.assertAlmostEqual(
schedule.localized_time_delta_sec(
end_time=(datetime.now() + timedelta(seconds=5.0)).time()
),
5.0,
places=3
)
if __name__ == '__main__':
unittest.main()
| 28.835017
| 73
| 0.544722
| 836
| 8,564
| 5.214115
| 0.07177
| 0.119293
| 0.184675
| 0.183528
| 0.951136
| 0.944024
| 0.944024
| 0.94173
| 0.911906
| 0.855701
| 0
| 0.026238
| 0.368052
| 8,564
| 296
| 74
| 28.932432
| 0.779194
| 0
| 0
| 0.717949
| 0
| 0
| 0.047057
| 0
| 0
| 0
| 0
| 0
| 0.14652
| 1
| 0.07326
| false
| 0
| 0.018315
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
70f1165837ecf67adf73489aad5ff61e3fcdb18a
| 3,988
|
py
|
Python
|
src_py/hat/drivers/iec60870/iec103/__init__.py
|
PtahSeker/hat-drivers
|
d694b71538ccaf23fb145f09282f78be5c4c18c6
|
[
"Apache-2.0"
] | null | null | null |
src_py/hat/drivers/iec60870/iec103/__init__.py
|
PtahSeker/hat-drivers
|
d694b71538ccaf23fb145f09282f78be5c4c18c6
|
[
"Apache-2.0"
] | null | null | null |
src_py/hat/drivers/iec60870/iec103/__init__.py
|
PtahSeker/hat-drivers
|
d694b71538ccaf23fb145f09282f78be5c4c18c6
|
[
"Apache-2.0"
] | null | null | null |
"""IEC 60870-5-103 communication protocol"""
from hat.drivers.iec60870.iec103.common import (Bytes,
Description,
IoAddress,
Identification,
TimeSize,
Time,
ValueType,
NoneValue,
TextValue,
BitstringValue,
UIntValue,
IntValue,
UFixedValue,
FixedValue,
Real32Value,
Real64Value,
DoubleValue,
SingleValue,
ExtendedDoubleValue,
MeasurandValue,
TimeValue,
IdentificationValue,
RelativeTimeValue,
IoAddressValue,
DoubleWithTimeValue,
DoubleWithRelativeTimeValue,
MeasurandWithRelativeTimeValue,
TextNumberValue,
ReplyValue,
ArrayValue,
IndexValue,
Value,
AsduAddress,
DataCause,
GenericDataCause,
MeasurandType,
MeasurandValues,
Data,
GenericData,
time_from_datetime,
time_to_datetime)
from hat.drivers.iec60870.iec103.master import (DataCb,
GenericDataCb,
MasterConnection)
__all__ = ['Bytes',
'Description',
'IoAddress',
'Identification',
'TimeSize',
'Time',
'ValueType',
'NoneValue',
'TextValue',
'BitstringValue',
'UIntValue',
'IntValue',
'UFixedValue',
'FixedValue',
'Real32Value',
'Real64Value',
'DoubleValue',
'SingleValue',
'ExtendedDoubleValue',
'MeasurandValue',
'TimeValue',
'IdentificationValue',
'RelativeTimeValue',
'IoAddressValue',
'DoubleWithTimeValue',
'DoubleWithRelativeTimeValue',
'MeasurandWithRelativeTimeValue',
'TextNumberValue',
'ReplyValue',
'ArrayValue',
'IndexValue',
'Value',
'AsduAddress',
'DataCause',
'GenericDataCause',
'MeasurandType',
'MeasurandValues',
'Data',
'GenericData',
'time_from_datetime',
'time_to_datetime',
'DataCb',
'GenericDataCb',
'MasterConnection']
| 42.88172
| 79
| 0.300903
| 117
| 3,988
| 10.153846
| 0.504274
| 0.011785
| 0.023569
| 0.037037
| 0.890572
| 0.843434
| 0.843434
| 0.843434
| 0.843434
| 0.843434
| 0
| 0.023639
| 0.64995
| 3,988
| 92
| 80
| 43.347826
| 0.827364
| 0.009529
| 0
| 0
| 0
| 0
| 0.136917
| 0.014452
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022727
| 0
| 0.022727
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
cb0295afdfafe4f65d1f56889953660d88329e9d
| 52,159
|
py
|
Python
|
gsf/maketmp_filt.py
|
mtakahiro/gsf
|
c09c5d32a45b0277c469d2d3cb2f8c11f1fc0278
|
[
"MIT"
] | 9
|
2019-08-23T19:00:54.000Z
|
2022-02-23T17:57:41.000Z
|
gsf/maketmp_filt.py
|
mtakahiro/gsf
|
c09c5d32a45b0277c469d2d3cb2f8c11f1fc0278
|
[
"MIT"
] | 17
|
2020-05-22T17:41:15.000Z
|
2022-03-20T03:32:48.000Z
|
gsf/maketmp_filt.py
|
mtakahiro/gsf
|
c09c5d32a45b0277c469d2d3cb2f8c11f1fc0278
|
[
"MIT"
] | 1
|
2020-02-01T22:55:37.000Z
|
2020-02-01T22:55:37.000Z
|
# The purpose of this code is to figure out Z and redshift (with 1-sig range).
import matplotlib.pyplot as plt
import numpy as np
import scipy
import sys
import os
from scipy.integrate import simps
import asdf
from astropy.io import fits,ascii
from astropy.modeling.models import Moffat1D
from astropy.convolution import convolve, convolve_fft
# Custom modules
from .function import *
from .function_igm import *
col = ['b', 'skyblue', 'g', 'orange', 'r']
def get_spectrum_draine(lambda_d, DL, zbest, numin, numax, ndmodel, \
DIR_DUST='./DL07spec/', phi=0.055):
'''
Parameters
----------
lambda_d : array
Wavelength array, in AA.
phi : float
Eq.34 of Draine & Li 2007. (default: 0.055)
Returns
-------
Interpolated dust emission in Fnu of m0=25.0. In units of Fnu/Msun
Notes
-----
umins = ['0.10', '0.15', '0.20', '0.30', '0.40', '0.50', '0.70', '0.80', '1.00', '1.20',\
'1.50', '2.00', '2.50', '3.00', '4.00', '5.00', '7.00', '8.00', '10.0', '12.0', '15.0',\
'20.0', '25.0']
umaxs = ['1e3', '1e4', '1e5', '1e6', '1e7']
'''
from .function import fnutonu
import scipy.interpolate as interpolate
Htokg = 1.66054e-27 # kg/H
kgtomsun = 1.989e+30 # kg/Msun
MsunperH = Htokg / kgtomsun # Msun/H
Jytoerg = 1e-23 # erg/s/cm2/Hz / Jy
c = 3e18
Mpc_cm = 3.08568025e+24
umins = ['0.10', '0.15', '0.20', '0.30', '0.40', '0.50', '0.70', '0.80', '1.00', '1.20',\
'1.50', '2.00', '2.50', '3.00', '4.00', '5.00', '7.00', '8.00', '12.0', '15.0',\
'20.0', '25.0']
umaxs = ['1e3', '1e4', '1e5', '1e6', '1e7']
dust_model = DIR_DUST+'draine07_models.txt'
fd_model = ascii.read(dust_model)
umin = umins[numin]
umax = umaxs[numax]
dmodel = fd_model['name'][ndmodel]
# See README of Draine's table.
#dU = float(umin)/100.
#U = np.arange(float(umin), float(umax), dU)
#Umean = np.mean(U)
#print(Umean)
gamma = 0.01
Umean = (1-gamma) * float(umin) + (gamma * float(umin) * np.log(float(umax)/float(umin))) / (1-float(umin)/float(umax))
#print(Umean)
#try:
if True:
#if dmodel == 'MW3.1_60':
if ndmodel == 6 or ndmodel == 1:
data_start = 55
else:
data_start = 36
file_dust = DIR_DUST + 'U%s/U%s_%s_%s.txt'%(umin, umin, umax, dmodel)
print(file_dust)
fd = ascii.read(file_dust, data_start=data_start)
wave = fd['col1'] # in mu m.
flux = fd['col2'] # erg/s H-1
flux_dens = fd['col3'] # j_nu: Jy cm2 sr-1 H-1
fobs = flux_dens * Jytoerg / (4.*np.pi*DL**2/(1.+zbest)) / MsunperH
# Jy cm2 sr-1 H-1 * erg/s/cm2/Hz / Jy / (cm2 * sr) / (Msun/H) = erg/s/cm2/Hz / Msun
freq = c / (wave*1e4) # 1/Hz
ftot = np.sum(flux/ MsunperH) # erg/s H-1 / (Msun/H) = erg/s/Msun
#Mh = ftot * phi # erg/s/Msun * g/(erg/s) = g/Msun
# Get Mdust to MH2 ratio;
#ftot2 = np.sum(flux * freq)
#MdtoMh = phi / Umean * ftot2 / (Htokg*1e3) # g/(erg/s)/H / 1 * erg/s/Msun / g * Msun/H = 1/Msun
#print(MdtoMh)
MdtoMh = 0.01 #1.0
Mdust = 1.0 * MdtoMh #* Mh * kgtomsun * mh # Msun/template
# Then;
fnu = fnutonu(fobs) / Mdust # Flux density per 1Msun for dust.
fint = interpolate.interp1d(wave*1e4, fnu, kind='nearest', fill_value="extrapolate")
yy_s = fint(lambda_d)
con_yys = (lambda_d<1e4) # Interpolation cause some error??
yy_s[con_yys] = 0
#except:
# print('Something is wrong.',file_dust)
# yy_s = lambda_d * 0
return yy_s
def sim_spec(lmin, fin, sn):
'''
SIMULATION of SPECTRA.
Parameters
----------
wave_obs :
wave_temp :
flux_temp :
sn_obs
Returns
-------
frand, erand
'''
frand = fin * 0
erand = fin * 0
for ii in range(len(lmin)):
if fin[ii]>0 and sn[ii]>0:
erand[ii] = fin[ii]/sn[ii]
frand[ii] = np.random.normal(fin[ii],erand[ii],1)
else:
erand[ii] = 1e10
frand[ii] = np.random.normal(fin[ii],erand[ii],1)
return frand, erand
def check_library(MB, af):
'''
Purpose
-------
Check library if it has a consistency setup as input file.
Returns
-------
True is no problem.
'''
# Z needs special care in z0 script, to avoid Zfix.
if False:
Zmax_tmp, Zmin_tmp = float(MB.inputs['ZMAX']), float(MB.inputs['ZMIN'])
delZ_tmp = float(MB.inputs['DELZ'])
if Zmax_tmp == Zmin_tmp or delZ_tmp==0:
delZ_tmp = 0.0001
Zall = np.arange(Zmin_tmp, Zmax_tmp+delZ_tmp, delZ_tmp) # in logZsun
else:
Zall = MB.Zall
flag = True
# Matallicity:
for aa in range(len(Zall)):
if Zall[aa] != af['Z%d'%(aa)]:
print('Z:', Zall[aa], af['Z%d'%(aa)])
flag = False
if MB.SFH_FORM==-99:
# Age:
for aa in range(len(MB.age)):
if MB.age[aa] != af['age%d'%(aa)]:
print('age:', MB.age[aa], af['age%d'%(aa)])
flag = False
# Tau (e.g. ssp/csp):
for aa in range(len(MB.tau0)):
if MB.tau0[aa] != af['tau0%d'%(aa)]:
print('tau0:', MB.tau0[aa], af['tau0%d'%(aa)])
flag = False
else:
# Age:
for aa in range(len(MB.ageparam)):
if MB.ageparam[aa] != af['age%d'%(aa)]:
print('age:', MB.ageparam[aa], af['age%d'%(aa)])
flag = False
for aa in range(len(MB.tau)):
if MB.tau[aa] != af['tau%d'%(aa)]:
print('tau:', MB.tau[aa], af['tau%d'%(aa)])
flag = False
# IMF:
if MB.nimf != af['nimf']:
print('nimf:', MB.nimf, af['nimf'])
flag = False
return flag
def get_LSF(inputs, DIR_EXTR, ID, lm, c=3e18):
'''
Gets Morphology params, and returns LSF
'''
Amp = 0
f_morp = False
try:
if inputs['MORP'] == 'moffat' or inputs['MORP'] == 'gauss':
f_morp = True
try:
mor_file = inputs['MORP_FILE'].replace('$ID','%s'%(ID))
fm = ascii.read(DIR_EXTR + mor_file)
Amp = fm['A']
gamma = fm['gamma']
if inputs['MORP'] == 'moffat':
alp = fm['alp']
else:
alp = 0
except Exception:
print('Error in reading morphology params.')
print('No morphology convolution.')
pass
else:
print('MORP Keywords does not match.')
print('No morphology convolution.')
except:
pass
############################
# Template convolution;
############################
try:
sig_temp = float(inputs['SIG_TEMP'])
except:
sig_temp = 50.
print('Template resolution is unknown.')
print('Set to %.1f km/s.'%(sig_temp))
dellam = lm[1] - lm[0] # AA/pix
R_temp = c/(sig_temp*1e3*1e10)
sig_temp_pix = np.median(lm) / R_temp / dellam # delta v in pixel;
#
sig_inst = 0 #65 #km/s for Manga
# If grism;
if f_morp:
print('Templates convolution (intrinsic morphology).')
if gamma>sig_temp_pix:
sig_conv = np.sqrt(gamma**2-sig_temp_pix**2)
else:
sig_conv = 0
print('Template resolution is broader than Morphology.')
print('No convolution is applied to templates.')
xMof = np.arange(-5, 5.1, .1) # dimension must be even.
if inputs['MORP'] == 'moffat' and Amp>0 and alp>0:
LSF = moffat(xMof, Amp, 0, np.sqrt(gamma**2-sig_temp_pix**2), alp)
print('Template convolution with Moffat.')
elif inputs['MORP'] == 'gauss':
sigma = gamma
LSF = gauss(xMof, Amp, np.sqrt(sigma**2-sig_temp_pix**2))
print('Template convolution with Gaussian.')
print('params is sigma;',sigma)
else:
print('Something is wrong with the convolution file. Exiting.')
return False
else: # For slit spectroscopy. To be updated...
print('Templates convolution (intrinsic velocity).')
try:
vdisp = float(inputs['VDISP'])
dellam = lm[1] - lm[0] # AA/pix
#R_disp = c/(vdisp*1e3*1e10)
R_disp = c/(np.sqrt(vdisp**2-sig_inst**2)*1e3*1e10)
vdisp_pix = np.median(lm) / R_disp / dellam # delta v in pixel;
print('Templates are convolved at %.2f km/s.'%(vdisp))
if vdisp_pix-sig_temp_pix>0:
sig_conv = np.sqrt(vdisp_pix**2-sig_temp_pix**2)
else:
sig_conv = 0
except:
vdisp = 0.
print('Templates are not convolved.')
sig_conv = 0 #np.sqrt(sig_temp_pix**2)
pass
xMof = np.arange(-5, 5.1, .1) # dimension must be even.
Amp = 1.
LSF = gauss(xMof, Amp, sig_conv)
return LSF, lm
def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, tau_lim=0.001, tmp_norm=1e10):
'''
Make SPECTRA at given z and filter set.
Parameters
----------
inputs : str
Configuration file.
zbest : float
Best redshift at this iteration. Templates are generated based on this reshift.
Z : array
Stellar phase metallicity in logZsun.
age : array
Age, in Gyr.
fneb : int
flag for adding nebular emissionself.
tmp_norm : float
Normalization of the stored templated. i.e. each template is in units of tmp_norm [Lsun].
'''
inputs = MB.inputs
ID = MB.ID
age = MB.age
nage = MB.nage
Z = MB.Zall
fneb = MB.fneb
DIR_TMP = MB.DIR_TMP
zbest = MB.zgal
tau0 = MB.tau0
try:
af = MB.af0
except:
af = asdf.open(DIR_TMP + 'spec_all.asdf')
MB.af0 = af
mshdu = af['ML']
spechdu = af['spec']
# Consistency check:
flag = check_library(MB, af)
if not flag:
print('\n!!!\nThere is inconsistency in z0 library and input file. Exiting.\n!!!\n')
sys.exit()
# ASDF Big tree;
# Create header;
tree = {
'isochrone': af['isochrone'],
'library': af['library'],
'nimf': af['nimf'],
'version_gsf': af['version_gsf']
}
tree_spec = {}
tree_spec_full = {}
tree_ML = {}
tree_SFR = {}
try:
DIR_EXTR = MB.DIR_EXTR #inputs['DIR_EXTR']
if len(DIR_EXTR)==0:
DIR_EXTR = False
except:
DIR_EXTR = False
DIR_FILT = MB.DIR_FILT #inputs['DIR_FILT']
try:
CAT_BB_IND = inputs['CAT_BB_IND']
except:
CAT_BB_IND = False
try:
CAT_BB = inputs['CAT_BB']
except:
CAT_BB = False
try:
SFILT = MB.filts #inputs['FILTER'] # filter band string.
FWFILT = fil_fwhm(SFILT, DIR_FILT)
except:
print('########################')
print('Filter is not detected!!')
print('Make sure your \nfilter directory is correct.')
print('########################')
sys.exit()
try:
SKIPFILT = inputs['SKIPFILT']
SKIPFILT = [x.strip() for x in SKIPFILT.split(',')]
except:
SKIPFILT = []
# If FIR data;
try:
DFILT = inputs['FIR_FILTER'] # filter band string.
DFILT = [x.strip() for x in DFILT.split(',')]
DFWFILT = fil_fwhm(DFILT, DIR_FILT)
CAT_BB_DUST = inputs['CAT_BB_DUST']
DT0 = float(inputs['TDUST_LOW'])
DT1 = float(inputs['TDUST_HIG'])
dDT = float(inputs['TDUST_DEL'])
f_dust = True
print('FIR is implemented.\n')
except:
print('No FIR is implemented.\n')
f_dust = False
pass
print('############################')
print('Making templates at z=%.4f'%(zbest))
print('############################')
####################################################
# Get extracted spectra.
####################################################
#
# Get ascii data.
#
f_spec = False
try:
spec_files = inputs['SPEC_FILE'] #.replace('$ID','%s'%(ID))
spec_files = [x.strip() for x in spec_files.split(',')]
ninp0 = np.zeros(len(spec_files), dtype='int')
for ff, spec_file in enumerate(spec_files):
try:
fd0 = np.loadtxt(DIR_EXTR + spec_file, comments='#')
lm0tmp = fd0[:,0]
fobs0 = fd0[:,1]
eobs0 = fd0[:,2]
ninp0[ff] = len(lm0tmp)#[con_tmp])
except Exception:
print('File, %s/%s, cannot be open.'%(DIR_EXTR,spec_file))
pass
# Constructing arrays.
lm = np.zeros(np.sum(ninp0[:]),dtype='float')
fobs = np.zeros(np.sum(ninp0[:]),dtype='float')
eobs = np.zeros(np.sum(ninp0[:]),dtype='float')
fgrs = np.zeros(np.sum(ninp0[:]),dtype='int') # FLAG for G102/G141.
for ff, spec_file in enumerate(spec_files):
try:
fd0 = np.loadtxt(DIR_EXTR + spec_file, comments='#')
lm0tmp= fd0[:,0]
fobs0 = fd0[:,1]
eobs0 = fd0[:,2]
for ii1 in range(ninp0[ff]):
if ff==0:
ii = ii1
else:
ii = ii1 + np.sum(ninp0[:ff])
fgrs[ii] = ff
lm[ii] = lm0tmp[ii1]
fobs[ii] = fobs0[ii1]
eobs[ii] = eobs0[ii1]
f_spec = True
except Exception:
pass
except:
print('No spec file is provided.')
pass
#############################
# READ BB photometry from CAT_BB:
#############################
if CAT_BB:
fd0 = ascii.read(CAT_BB)
id0 = fd0['id'].astype('str')
ii0 = np.where(id0[:]==ID)
try:
if len(ii0[0]) == 0:
print('Could not find the column for [ID: %s] in the input BB catalog! Exiting.'%(ID))
return False
id = fd0['id'][ii0]
except:
print('Could not find the column for [ID: %s] in the input BB catalog! Exiting.'%(ID))
return False
fbb = np.zeros(len(SFILT), dtype='float')
ebb = np.zeros(len(SFILT), dtype='float')
for ii in range(len(SFILT)):
try:
fbb[ii] = fd0['F%s'%(SFILT[ii])][ii0]
ebb[ii] = fd0['E%s'%(SFILT[ii])][ii0]
except:
print('Could not find flux inputs for filter %s in the input BB catalog! Exiting.'%(SFILT[ii]))
return False
elif CAT_BB_IND: # if individual photometric catalog; made in get_sdss.py
fd0 = fits.open(DIR_EXTR + CAT_BB_IND)
hd0 = fd0[1].header
bunit_bb = float(hd0['bunit'][:5])
lmbb0= fd0[1].data['wavelength']
fbb0 = fd0[1].data['flux'] * bunit_bb
ebb0 = 1/np.sqrt(fd0[1].data['inverse_variance']) * bunit_bb
unit = 'nu'
try:
unit = inputs['UNIT_SPEC']
except:
print('No param for UNIT_SPEC is found.')
print('BB flux unit is assumed to Fnu.')
pass
if unit == 'lambda':
print('#########################')
print('Changed BB from Flam to Fnu')
snbb0= fbb0/ebb0
fbb = flamtonu(lmbb0, fbb0)
ebb = fbb/snbb0
else:
snbb0= fbb0/ebb0
fbb = fbb0
ebb = ebb0
else:
fbb = np.zeros(len(SFILT), dtype='float')
ebb = np.zeros(len(SFILT), dtype='float')
for ii in range(len(SFILT)):
fbb[ii] = 0
ebb[ii] = -99 #1000
# Dust flux;
if f_dust:
fdd = ascii.read(CAT_BB_DUST)
try:
id0 = fdd['id'].astype('str')
ii0 = np.where(id0[:]==ID)
try:
id = fd0['id'][ii0]
except:
print('Could not find the column for [ID: %s] in the input BB catalog! Exiting.'%(ID))
return False
except:
return False
id = fdd['id']
fbb_d = np.zeros(len(DFILT), dtype='float')
ebb_d = np.zeros(len(DFILT), dtype='float')
for ii in range(len(DFILT)):
fbb_d[ii] = fdd['F%s'%(DFILT[ii])][ii0]
ebb_d[ii] = fdd['E%s'%(DFILT[ii])][ii0]
#################
# Get morphology;
#################
if f_spec:
LSF, lm = get_LSF(inputs, DIR_EXTR, ID, lm)
else:
lm = []
####################################
# Start generating templates
####################################
col00 = []
col01 = []
col02 = []
for zz in range(len(Z)):
for pp in range(len(tau0)):
Zbest = Z[zz]
Na = len(age)
Ntmp = 1
age_univ= MB.cosmo.age(zbest).value #, use_flat=True, **cosmo)
if zz == 0 and pp == 0:
lm0 = spechdu['wavelength']
lmbest = np.zeros((Ntmp, len(lm0)), dtype='float')
fbest = np.zeros((Ntmp, len(lm0)), dtype='float')
lmbestbb = np.zeros((Ntmp, len(SFILT)), dtype='float')
fbestbb = np.zeros((Ntmp, len(SFILT)), dtype='float')
spec_mul = np.zeros((Na, len(lm0)), dtype='float')
spec_mul_nu = np.zeros((Na, len(lm0)), dtype='float')
spec_mul_nu_conv = np.zeros((Na, len(lm0)), dtype='float')
ftmpbb = np.zeros((Na, len(SFILT)), dtype='float')
ltmpbb = np.zeros((Na, len(SFILT)), dtype='float')
ftmp_nu_int = np.zeros((Na, len(lm)), dtype='float')
spec_av_tmp = np.zeros((Na, len(lm)), dtype='float')
ms = np.zeros(Na, dtype='float')
Ls = np.zeros(Na, dtype='float')
tau = np.zeros(Na, dtype='float')
sfr = np.zeros(Na, dtype='float')
ms[:] = mshdu['ms_'+str(zz)][:] # [:] is necessary.
Ls[:] = mshdu['Ls_'+str(zz)][:]
Fuv = np.zeros(Na, dtype='float')
for ss in range(Na):
wave = spechdu['wavelength']
if fneb == 1 and MB.f_bpass==0:
spec_mul[ss] = spechdu['efspec_'+str(zz)+'_'+str(ss)+'_'+str(pp)]
else:
spec_mul[ss] = spechdu['fspec_'+str(zz)+'_'+str(ss)+'_'+str(pp)]
###################
# IGM attenuation.
###################
f_IGM = True
if f_IGM:
spec_av_tmp = madau_igm_abs(wave, spec_mul[ss,:], zbest, cosmo=MB.cosmo)
else:
spec_av_tmp = spec_mul[ss,:]
spec_mul_nu[ss,:] = flamtonu(wave, spec_av_tmp)
# Distance;
DL = MB.cosmo.luminosity_distance(zbest).value * MB.Mpc_cm # Luminositydistance in cm
wavetmp = wave*(1.+zbest)
Lsun = 3.839 * 1e33 #erg s-1
spec_mul_nu[ss,:] *= Lsun/(4.*np.pi*DL**2/(1.+zbest))
spec_mul_nu[ss,:] *= (1./Ls[ss])*tmp_norm # in unit of erg/s/Hz/cm2/ms[ss].
ms[ss] *= (1./Ls[ss])*tmp_norm # M/L; 1 unit template has this mass in Msolar.
tautmp = af['ML']['realtau_%d'%int(zz)]
sfr[ss] = ms[ss] / (tautmp[ss]*1e9) # SFR per unit template, in units of Msolar/yr.
if f_spec:
ftmp_nu_int[ss,:] = data_int(lm, wavetmp, spec_mul_nu[ss,:])
ltmpbb[ss,:], ftmpbb[ss,:] = filconv(SFILT, wavetmp, spec_mul_nu[ss,:], DIR_FILT, MB=MB, f_regist=False)
# Convolution has to come after this?
if len(lm)>0:
try:
spec_mul_nu_conv[ss,:] = convolve(spec_mul_nu[ss], LSF, boundary='extend')
except:
spec_mul_nu_conv[ss,:] = spec_mul_nu[ss]
if zz==0 and ss==0:
print('Kernel is too small. No convolution.')
else:
spec_mul_nu_conv[ss,:] = spec_mul_nu[ss]
##########################################
# Writing out the templates to fits table.
##########################################
if ss == 0 and pp == 0 and zz == 0:
# First file
nd1 = np.arange(0,len(lm),1)
nd3 = np.arange(10000,10000+len(ltmpbb[ss,:]),1)
nd_ap = np.append(nd1,nd3)
lm_ap = np.append(lm, ltmpbb[ss,:])
col1 = fits.Column(name='wavelength', format='E', unit='AA', array=lm_ap)
col2 = fits.Column(name='colnum', format='K', unit='', array=nd_ap)
col00 = [col1, col2]
# ASDF
tree_spec.update({'wavelength':lm_ap})
tree_spec.update({'colnum':nd_ap})
# Second file
col3 = fits.Column(name='wavelength', format='E', unit='AA', array=wavetmp)
nd = np.arange(0,len(wavetmp),1)
col4 = fits.Column(name='colnum', format='K', unit='', array=nd)
col01 = [col3, col4]
# ASDF
tree_spec_full.update({'wavelength':wavetmp})
tree_spec_full.update({'colnum':nd})
spec_ap = np.append(ftmp_nu_int[ss,:], ftmpbb[ss,:])
# ASDF
tree_spec.update({'fspec_'+str(zz)+'_'+str(ss)+'_'+str(pp): spec_ap})
# ASDF
tree_spec_full.update({'fspec_orig_'+str(zz)+'_'+str(ss)+'_'+str(pp): spec_mul_nu[ss,:]})
tree_spec_full.update({'fspec_'+str(zz)+'_'+str(ss)+'_'+str(pp): spec_mul_nu_conv[ss,:]})
#########################
# Summarize the ML
#########################
if pp == 0:
# ML
colms = fits.Column(name='ML_'+str(zz), format='E', unit='Msun/%.1eLsun'%(tmp_norm), array=ms)
col02.append(colms)
tree_ML.update({'ML_'+str(zz): ms})
# SFR
colms = fits.Column(name='SFR_'+str(zz), format='E', unit='Msun/yr', array=sfr)
col02.append(colms)
tree_SFR.update({'SFR_'+str(zz): sfr})
#########################
# Summarize the templates
#########################
tree.update({'spec' : tree_spec})
tree.update({'spec_full' : tree_spec_full})
tree.update({'ML' : tree_ML})
tree.update({'SFR' : tree_SFR})
######################
# Add dust component;
######################
if f_dust:
tree_spec_dust = {}
tree_spec_dust_full = {}
if DT0 == DT1:
Temp = [DT0]
else:
Temp = np.arange(DT0,DT1,dDT)
dellam_d = 1e3
lambda_d = np.arange(1e3, 1e7, dellam_d)
'''
# c in AA/s.
kb = 1.380649e-23 # Boltzmann constant, in J/K
hp = 6.62607015e-34 # Planck constant, in J*s
# from Eq.3 of Bianchi 13
kabs0 = 4.0 # in cm2/g
beta_d= 2.08 #
lam0 = 250.*1e4 # mu m to AA
from astropy.modeling import models
from astropy import units as u
'''
print('Reading dust table...')
for tt in range(len(Temp)):
if tt == 0:
# For full;
nd_d = np.arange(0,len(lambda_d),1)
# ASDF
tree_spec_dust_full.update({'wavelength': lambda_d*(1.+zbest)})
tree_spec_dust_full.update({'colnum': nd_d})
'''
bb = models.BlackBody(temperature=Temp[tt]*u.K)
wav = lambda_d * u.AA
BT_nu = bb(wav) # erg/Hz/s/sr/cm2
kappa = kabs0 * (lam0/wav)**beta_d # cm2/g
# if optically thin;
#kappa = nu_d ** beta_d
fnu_d = (1+zbest)/DL**2 * kappa * BT_nu # 1/cm2 * cm2/g * erg/Hz/s/sr/cm2 = erg/s/cm^2/Hz/g/sr
fnu_d *= 1.989e+33 # erg/s/cm^2/Hz/Msun/sr; i.e. 1 flux is in 1Msun
'''
#numin, numax, nmodel = 8, 3, 9
numin, numax, nmodel = tt, MB.dust_numax, MB.dust_nmodel #3, 9
fnu_d = get_spectrum_draine(lambda_d, DL, zbest, numin, numax, nmodel, DIR_DUST=MB.DIR_DUST)
if False:
for nn in range(0,11,1):
try:
fnu_d_tmp = get_spectrum_draine(lambda_d, DL, zbest, numin, numax, nn, DIR_DUST=MB.DIR_DUST)
plt.plot(lambda_d * (1+zbest), fnu_d_tmp, label='%d'%nn)
plt.xlim(2000, 5000000)
plt.xscale('log')
plt.yscale('log')
except:
print('Errir in ',nn)
plt.legend()
plt.show()
# ASDF
tree_spec_dust_full.update({'fspec_'+str(tt): fnu_d})
# Convolution;
ALLFILT = np.append(SFILT,DFILT)
ltmpbb_d, ftmpbb_d = filconv(ALLFILT,lambda_d*(1.+zbest),fnu_d,DIR_FILT)
if f_spec:
ftmp_nu_int_d = data_int(lm, lambda_d*(1.+zbest), fnu_d)
ltmpbb_d = np.append(lm, ltmpbb_d)
ftmpbb_d = np.append(ftmp_nu_int_d, ftmpbb_d)
nd_db = np.arange(0, len(ftmpbb_d), 1)
if tt == 0:
# For conv;
col3 = fits.Column(name='wavelength', format='E', unit='AA', array=ltmpbb_d)
nd_db = np.arange(0,len(ltmpbb_d),1)
col4 = fits.Column(name='colnum', format='K', unit='', array=nd_db)
col04 = [col3, col4]
# ASDF
tree_spec_dust.update({'wavelength': ltmpbb_d})
tree_spec_dust.update({'colnum': nd_db})
tree_spec_dust.update({'fspec_'+str(tt): ftmpbb_d})
tree.update({'spec_dust' : tree_spec_dust})
tree.update({'spec_dust_full' : tree_spec_dust_full})
print('dust updated.')
# Save;
af = asdf.AsdfFile(tree)
af.write_to(DIR_TMP + 'spec_all_' + ID + '.asdf', all_array_compression='zlib')
# Re-register
MB.af = af
##########################################
# For observation.
# Write out for the Multi-component fitting.
##########################################
fw = open(DIR_TMP + 'spec_obs_' + ID + '.cat', 'w')
fw.write('# BB data (>%d) in this file are not used in fitting.\n'%(ncolbb))
for ii in range(len(lm)):
if fgrs[ii]==0: # G102
if lm[ii]/(1.+zbest) > lamliml and lm[ii]/(1.+zbest) < lamlimu:
fw.write('%d %.5f %.5e %.5e\n'%(ii, lm[ii], fobs[ii], eobs[ii]))
else:
fw.write('%d %.5f 0 1000\n'%(ii, lm[ii]))
elif fgrs[ii]==1: # G141
if lm[ii]/(1.+zbest) > lamliml and lm[ii]/(1.+zbest) < lamlimu:
fw.write('%d %.5f %.5e %.5e\n'%(ii+1000, lm[ii], fobs[ii], eobs[ii]))
else:
fw.write('%d %.5f 0 1000\n'%(ii+1000, lm[ii]))
for ii in range(len(ltmpbb[0,:])):
if SFILT[ii] in SKIPFILT:# data point to be skiped;
fw.write('%d %.5f %.5e %.5e\n'%(ii+ncolbb, ltmpbb[0,ii], 0.0, fbb[ii]))
#fw.write('%d %.5f %.5e %.5e\n'%(ii+ncolbb, ltmpbb[0,ii], 0.0, 1000))
elif ebb[ii]>ebblim:
fw.write('%d %.5f 0 1000\n'%(ii+ncolbb, ltmpbb[0,ii]))
else:
fw.write('%d %.5f %.5e %.5e\n'%(ii+ncolbb, ltmpbb[0,ii], fbb[ii], ebb[ii]))
fw.close()
fw = open(DIR_TMP + 'spec_dust_obs_' + ID + '.cat', 'w')
if f_dust:
nbblast = len(ltmpbb[0,:])+len(lm)
for ii in range(len(ebb_d[:])):
if ebb_d[ii]>ebblim:
fw.write('%d %.5f 0 1000\n'%(ii+ncolbb+nbblast, ltmpbb_d[ii+nbblast]))
else:
fw.write('%d %.5f %.5e %.5e\n'%(ii+ncolbb+nbblast, ltmpbb_d[ii+nbblast], fbb_d[ii], ebb_d[ii]))
fw.close()
# BB phot
fw = open(DIR_TMP + 'bb_obs_' + ID + '.cat', 'w')
fw_rem = open(DIR_TMP + 'bb_obs_' + ID + '_removed.cat', 'w')
for ii in range(len(ltmpbb[0,:])):
if SFILT[ii] in SKIPFILT:# data point to be skiped;
fw.write('%d %.5f %.5e %.5e %.1f\n'%(ii+ncolbb, ltmpbb[0,ii], 0.0, fbb[ii], FWFILT[ii]/2.))
fw_rem.write('%d %.5f %.5e %.5e %.1f\n'%(ii+ncolbb, ltmpbb[0,ii], fbb[ii], ebb[ii], FWFILT[ii]/2.))
elif ebb[ii]>ebblim:
fw.write('%d %.5f 0 1000 %.1f\n'%(ii+ncolbb, ltmpbb[0,ii], FWFILT[ii]/2.))
elif ebb[ii]<=0:
fw.write('%d %.5f 0 -99 %.1f\n'%(ii+ncolbb, ltmpbb[0,ii], FWFILT[ii]/2.))
else:
fw.write('%d %.5f %.5e %.5e %.1f\n'%(ii+ncolbb, ltmpbb[0,ii], fbb[ii], ebb[ii], FWFILT[ii]/2.))
fw.close()
fw_rem.close()
# Dust
fw = open(DIR_TMP + 'bb_dust_obs_' + ID + '.cat', 'w')
if f_dust:
for ii in range(len(ebb_d[:])):
if ebb_d[ii]>ebblim:
fw.write('%d %.5f 0 1000 %.1f\n'%(ii+ncolbb+nbblast, ltmpbb_d[ii+nbblast], DFWFILT[ii]/2.))
else:
fw.write('%d %.5f %.5e %.5e %.1f\n'%(ii+ncolbb+nbblast, ltmpbb_d[ii+nbblast], fbb_d[ii], ebb_d[ii], DFWFILT[ii]/2.))
fw.close()
print('Done making templates at z=%.2f.\n'%zbest)
return True
def maketemp_tau(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, tau_lim=0.001, f_IGM=True, nthin=1, tmp_norm=1e10):
'''
Make SPECTRA at given z and filter set.
Parameters
----------
inputs : str
Configuration file.
zbest :float
Best redshift at this iteration. Templates are generated based on this reshift.
Z : array
Stellar phase metallicity in logZsun.
age : array
Age, in Gyr.
fneb : int
flag for adding nebular emissionself.
f_IGM : bool
IGM attenuation. Madau.
nthin : int
Thinning templates.
'''
inputs = MB.inputs
ID = MB.ID
age = MB.ageparam
nage = MB.nage
tau = MB.tau
Z = MB.Zall
fneb = MB.fneb
DIR_TMP = MB.DIR_TMP
zbest = MB.zgal
af = asdf.open(DIR_TMP + 'spec_all.asdf')
mshdu = af['ML']
spechdu = af['spec']
# Consistency check:
flag = check_library(MB, af)
if not flag:
print('\n!!!\nThere is inconsistency in z0 library and input file. Exiting.\n!!!\n')
sys.exit()
# ASDF Big tree;
# Create header;
tree = {
'isochrone': af['isochrone'],
'library': af['library'],
'nimf': af['nimf'],
'version_gsf': af['version_gsf']
}
tree_spec = {}
tree_spec_full = {}
tree_ML = {}
try:
DIR_EXTR = MB.DIR_EXTR #inputs['DIR_EXTR']
if len(DIR_EXTR)==0:
DIR_EXTR = False
except:
DIR_EXTR = False
DIR_FILT = MB.DIR_FILT #inputs['DIR_FILT']
try:
CAT_BB_IND = inputs['CAT_BB_IND']
except:
CAT_BB_IND = False
try:
CAT_BB = MB.CAT_BB #inputs['CAT_BB']
except:
CAT_BB = False
try:
SFILT = MB.filts #inputs['FILTER'] # filter band string.
FWFILT = fil_fwhm(SFILT, DIR_FILT)
except:
print('########################')
print('Filter is not detected!!')
print('Make sure your \nfilter directory is correct.')
print('########################')
sys.exit()
try:
SKIPFILT = inputs['SKIPFILT']
SKIPFILT = [x.strip() for x in SKIPFILT.split(',')]
except:
SKIPFILT = []
# If FIR data;
try:
DFILT = inputs['FIR_FILTER'] # filter band string.
DFILT = [x.strip() for x in DFILT.split(',')]
DFWFILT = fil_fwhm(DFILT, DIR_FILT)
CAT_BB_DUST = inputs['CAT_BB_DUST']
DT0 = float(inputs['TDUST_LOW'])
DT1 = float(inputs['TDUST_HIG'])
dDT = float(inputs['TDUST_DEL'])
f_dust = True
print('FIR is implemented.\n')
except:
print('No FIR is implemented.\n')
f_dust = False
pass
print('############################')
print('Making templates at z=%.4f'%(zbest))
print('############################')
####################################################
# Get extracted spectra.
####################################################
f_spec = False
try:
spec_files = inputs['SPEC_FILE'] #.replace('$ID','%s'%(ID))
spec_files = [x.strip() for x in spec_files.split(',')]
ninp0 = np.zeros(len(spec_files), dtype='int')
for ff, spec_file in enumerate(spec_files):
try:
fd0 = np.loadtxt(DIR_EXTR + spec_file, comments='#')
lm0tmp= fd0[:,0]
fobs0 = fd0[:,1]
eobs0 = fd0[:,2]
ninp0[ff] = len(lm0tmp)#[con_tmp])
except Exception:
print('File, %s/%s, cannot be open.'%(DIR_EXTR,spec_file))
pass
# Constructing arrays.
lm = np.zeros(np.sum(ninp0[:]),dtype='float')
fobs = np.zeros(np.sum(ninp0[:]),dtype='float')
eobs = np.zeros(np.sum(ninp0[:]),dtype='float')
fgrs = np.zeros(np.sum(ninp0[:]),dtype='int') # FLAG for G102/G141.
for ff, spec_file in enumerate(spec_files):
try:
fd0 = np.loadtxt(DIR_EXTR + spec_file, comments='#')
lm0tmp= fd0[:,0]
fobs0 = fd0[:,1]
eobs0 = fd0[:,2]
for ii1 in range(ninp0[ff]):
if ff==0:
ii = ii1
else:
ii = ii1 + np.sum(ninp0[:ff])
fgrs[ii] = ff
lm[ii] = lm0tmp[ii1]
fobs[ii] = fobs0[ii1]
eobs[ii] = eobs0[ii1]
f_spec = True
except Exception:
pass
except:
print('No spec file is provided.')
pass
if f_spec:
nthin = 1
#############################
# READ BB photometry from CAT_BB:
#############################
if CAT_BB:
fd0 = ascii.read(CAT_BB)
id0 = fd0['id'].astype('str')
ii0 = np.where(id0[:]==ID)
try:
id = fd0['id'][ii0]
except:
print('Could not find the column for [ID: %s] in the input BB catalog! Exiting.'%(ID))
return False
if len(ii0) == 0:
print('Could not find the column for [ID: %s] in the input BB catalog! Exiting.'%(ID))
return False
fbb = np.zeros(len(SFILT), dtype='float')
ebb = np.zeros(len(SFILT), dtype='float')
for ii in range(len(SFILT)):
fbb[ii] = fd0['F%s'%(SFILT[ii])][ii0]
ebb[ii] = fd0['E%s'%(SFILT[ii])][ii0]
elif CAT_BB_IND: # if individual photometric catalog; made in get_sdss.py
fd0 = fits.open(DIR_EXTR + CAT_BB_IND)
hd0 = fd0[1].header
bunit_bb = float(hd0['bunit'][:5])
lmbb0= fd0[1].data['wavelength']
fbb0 = fd0[1].data['flux'] * bunit_bb
ebb0 = 1/np.sqrt(fd0[1].data['inverse_variance']) * bunit_bb
unit = 'nu'
try:
unit = inputs['UNIT_SPEC']
except:
print('No param for UNIT_SPEC is found.')
print('BB flux unit is assumed to Fnu.')
pass
if unit == 'lambda':
print('#########################')
print('Changed BB from Flam to Fnu')
snbb0 = fbb0/ebb0
fbb = flamtonu(lmbb0, fbb0)
ebb = fbb/snbb0
else:
snbb0 = fbb0/ebb0
fbb = fbb0
ebb = ebb0
else:
fbb = np.zeros(len(SFILT), dtype='float')
ebb = np.zeros(len(SFILT), dtype='float')
for ii in range(len(SFILT)):
fbb[ii] = 0
ebb[ii] = -99 #1000
# Dust flux;
if f_dust:
fdd = ascii.read(CAT_BB_DUST)
id0 = fdd['id'].astype('str')
ii0 = np.where(id0[:]==ID)
try:
id = fd0['id'][ii0]
except:
print('Could not find the column for [ID: %s] in the input BB catalog! Exiting.'%(ID))
return False
fbb_d = np.zeros(len(DFILT), dtype='float')
ebb_d = np.zeros(len(DFILT), dtype='float')
for ii in range(len(DFILT)):
fbb_d[ii] = fdd['F%s'%(DFILT[ii])][ii0]
ebb_d[ii] = fdd['E%s'%(DFILT[ii])][ii0]
#############################
# Getting Morphology params.
#############################
Amp = 0
f_morp = False
if f_spec:
try:
if inputs['MORP'] == 'moffat' or inputs['MORP'] == 'gauss':
f_morp = True
try:
mor_file = inputs['MORP_FILE'].replace('$ID','%s'%(ID))
#fm = np.loadtxt(DIR_EXTR + mor_file, comments='#')
fm = ascii.read(DIR_EXTR + mor_file)
Amp = fm['A']
gamma = fm['gamma']
if inputs['MORP'] == 'moffat':
alp = fm['alp']
else:
alp = 0
except Exception:
print('Error in reading morphology params.')
print('No morphology convolution.')
pass
else:
print('MORP Keywords does not match.')
print('No morphology convolution.')
except:
pass
############################
# Template convolution;
############################
try:
sig_temp = float(inputs['SIG_TEMP'])
except:
sig_temp = 50.
print('Template resolution is unknown.')
print('Set to %.1f km/s.'%(sig_temp))
dellam = lm[1] - lm[0] # AA/pix
R_temp = c/(sig_temp*1e3*1e10)
sig_temp_pix = np.median(lm) / R_temp / dellam # delta v in pixel;
#
sig_inst = 0 #65 #km/s for Manga
# If grism;
if f_morp:
print('Templates convolution (intrinsic morphology).')
if gamma>sig_temp_pix:
sig_conv = np.sqrt(gamma**2-sig_temp_pix**2)
else:
sig_conv = 0
print('Template resolution is broader than Morphology.')
print('No convolution is applied to templates.')
xMof = np.arange(-5, 5.1, .1) # dimension must be even.
if inputs['MORP'] == 'moffat' and Amp>0 and alp>0:
LSF = moffat(xMof, Amp, 0, np.sqrt(gamma**2-sig_temp_pix**2), alp)
print('Template convolution with Moffat.')
elif inputs['MORP'] == 'gauss':
sigma = gamma
LSF = gauss(xMof, Amp, np.sqrt(sigma**2-sig_temp_pix**2))
print('Template convolution with Gaussian.')
print('params is sigma;',sigma)
else:
print('Something is wrong with the convolution file. Exiting.')
return False
else: # For slit spectroscopy. To be updated...
print('Templates convolution (intrinsic velocity).')
try:
vdisp = float(inputs['VDISP'])
dellam = lm[1] - lm[0] # AA/pix
#R_disp = c/(vdisp*1e3*1e10)
R_disp = c/(np.sqrt(vdisp**2-sig_inst**2)*1e3*1e10)
vdisp_pix = np.median(lm) / R_disp / dellam # delta v in pixel;
print('Templates are convolved at %.2f km/s.'%(vdisp))
if vdisp_pix-sig_temp_pix>0:
sig_conv = np.sqrt(vdisp_pix**2-sig_temp_pix**2)
else:
sig_conv = 0
except:
vdisp = 0.
print('Templates are not convolved.')
sig_conv = 0 #np.sqrt(sig_temp_pix**2)
pass
xMof = np.arange(-5, 5.1, .1) # dimension must be even.
Amp = 1.
LSF = gauss(xMof, Amp, sig_conv)
else:
lm = []
####################################
# Start generating templates
####################################
col00 = []
col01 = []
col02 = []
for zz in range(len(Z)):
Zbest = Z[zz]
Na = len(age)
Ntmp = 1
age_univ= MB.cosmo.age(zbest).value #, use_flat=True, **cosmo)
for tt in range(len(tau)): # tau
if zz == 0 and tt == 0:
lm0 = spechdu['wavelength'][::nthin]
wave = lm0
lmbest = np.zeros((Ntmp, len(lm0)), dtype='float')
fbest = np.zeros((Ntmp, len(lm0)), dtype='float')
lmbestbb = np.zeros((Ntmp, len(SFILT)), dtype='float')
fbestbb = np.zeros((Ntmp, len(SFILT)), dtype='float')
#A = np.zeros(Na, dtype='float') + 1
spec_mul = np.zeros((Na, len(lm0)), dtype='float')
spec_mul_nu = np.zeros((Na, len(lm0)), dtype='float')
spec_mul_nu_conv = np.zeros((Na, len(lm0)), dtype='float')
ftmpbb = np.zeros((Na, len(SFILT)), dtype='float')
ltmpbb = np.zeros((Na, len(SFILT)), dtype='float')
ftmp_nu_int = np.zeros((Na, len(lm)), dtype='float')
spec_av_tmp = np.zeros((Na, len(lm)), dtype='float')
ms = np.zeros(Na, dtype='float')
Ls = np.zeros(Na, dtype='float')
ms[:] = mshdu['ms_'+str(zz)+'_'+str(tt)][:] # [:] is necessary.
Ls[:] = mshdu['Ls_'+str(zz)+'_'+str(tt)][:]
Fuv = np.zeros(Na, dtype='float')
for ss in range(Na):
#print(ss,tt,zz)
if ss == 0 and tt == 0 and zz == 0:
DL = MB.cosmo.luminosity_distance(zbest).value * MB.Mpc_cm # Luminositydistance in cm
wavetmp = wave*(1.+zbest)
Lsun = 3.839 * 1e33 #erg s-1
if fneb == 1:
spec_mul[ss] = spechdu['efspec_'+str(zz)+'_'+str(tt)+'_'+str(ss)][::nthin]
else:
spec_mul[ss] = spechdu['fspec_'+str(zz)+'_'+str(tt)+'_'+str(ss)][::nthin]
##################
# IGM attenuation.
##################
if f_IGM:
spec_av_tmp = madau_igm_abs(wave, spec_mul[ss,:], zbest, cosmo=MB.cosmo)
else:
spec_av_tmp = spec_mul[ss,:]
spec_mul_nu[ss,:] = flamtonu(wave, spec_av_tmp)
if len(lm)>0:
try:
spec_mul_nu_conv[ss,:] = convolve(spec_mul_nu[ss], LSF, boundary='extend')
except:
spec_mul_nu_conv[ss,:] = spec_mul_nu[ss]
if zz==0 and ss==0:
print('Kernel is too small. No convolution.')
else:
spec_mul_nu_conv[ss,:] = spec_mul_nu[ss]
spec_mul_nu_conv[ss,:] *= Lsun/(4.*np.pi*DL**2/(1.+zbest))
spec_mul_nu_conv[ss,:] *= (1./Ls[ss])*tmp_norm # in unit of erg/s/Hz/cm2/ms[ss].
ms[ss] *= (1./Ls[ss])*tmp_norm # M/L; 1 unit template has this mass in [Msolar].
if f_spec:
ftmp_nu_int[ss,:] = data_int(lm, wavetmp, spec_mul_nu_conv[ss,:])
# Register filter response;
#if ss == 0 and tt == 0 and zz == 0:
# filconv(SFILT, wavetmp, spec_mul_nu_conv[ss,:], DIR_FILT, fw=True, MB=MB, f_regist=True)
ltmpbb[ss,:], ftmpbb[ss,:] = filconv(SFILT, wavetmp, spec_mul_nu_conv[ss,:], DIR_FILT, MB=MB, f_regist=False)
##########################################
# Writing out the templates to fits table.
##########################################
if ss == 0 and tt == 0 and zz == 0:
# First file
nd1 = np.arange(0,len(lm),1)
nd3 = np.arange(10000,10000+len(ltmpbb[ss,:]),1)
nd_ap = np.append(nd1,nd3)
lm_ap = np.append(lm, ltmpbb[ss,:])
col1 = fits.Column(name='wavelength', format='E', unit='AA', array=lm_ap)
col2 = fits.Column(name='colnum', format='K', unit='', array=nd_ap)
col00 = [col1, col2]
# ASDF
tree_spec.update({'wavelength':lm_ap})
tree_spec.update({'colnum':nd_ap})
# Second file
col3 = fits.Column(name='wavelength', format='E', unit='AA', array=wavetmp)
nd = np.arange(0,len(wavetmp),1)
col4 = fits.Column(name='colnum', format='K', unit='', array=nd)
col01 = [col3, col4]
# ASDF
tree_spec_full.update({'wavelength':wavetmp})
tree_spec_full.update({'colnum':nd})
# ASDF
spec_ap = np.append(ftmp_nu_int[ss,:], ftmpbb[ss,:])
tree_spec.update({'fspec_'+str(zz)+'_'+str(tt)+'_'+str(ss): spec_ap})
tree_spec_full.update({'fspec_'+str(zz)+'_'+str(tt)+'_'+str(ss): spec_mul_nu_conv[ss,:]})
#########################
# Summarize the ML
#########################
# ASDF
tree_ML.update({'ML_'+str(zz)+'_'+str(tt): ms})
#########################
# Summarize the templates
#########################
tree.update({'spec' : tree_spec})
tree.update({'spec_full' : tree_spec_full})
tree.update({'ML' : tree_ML})
######################
# Add dust component;
######################
if f_dust:
tree_spec_dust = {}
tree_spec_dust_full = {}
if DT0 == DT1:
Temp = [DT0]
else:
Temp = np.arange(DT0,DT1,dDT)
dellam_d = 1e3
lambda_d = np.arange(1e3, 1e7, dellam_d) # RF wavelength, in AA. #* (1.+zbest) # 1um to 1000um; This has to be wide enough, to cut dust contribution at <1um.
print('Reading dust table...')
for tt in range(len(Temp)):
if tt == 0:
# For full;
nd_d = np.arange(0,len(lambda_d),1)
# ASDF
tree_spec_dust_full.update({'wavelength': lambda_d*(1.+zbest)})
tree_spec_dust_full.update({'colnum': nd_d})
#numin, numax, nmodel = 8, 3, 9
numin, numax, nmodel = tt, 3, 9
fnu_d = get_spectrum_draine(lambda_d, DL, zbest, numin, numax, nmodel, DIR_DUST=MB.DIR_DUST)
if False:
for nn in range(0,11,1):
try:
fnu_d_tmp = get_spectrum_draine(lambda_d, DL, zbest, numin, numax, nn, DIR_DUST=MB.DIR_DUST)
plt.plot(lambda_d * (1+zbest), fnu_d_tmp, label='%d'%nn)
plt.xlim(2000, 5000000)
plt.xscale('log')
plt.yscale('log')
except:
print('Errir in ',nn)
plt.legend()
plt.show()
# ASDF
tree_spec_dust_full.update({'fspec_'+str(tt): fnu_d})
# Convolution;
ALLFILT = np.append(SFILT,DFILT)
ltmpbb_d, ftmpbb_d = filconv(ALLFILT,lambda_d*(1.+zbest),fnu_d,DIR_FILT)
if f_spec:
ftmp_nu_int_d = data_int(lm, lambda_d*(1.+zbest), fnu_d)
ltmpbb_d = np.append(lm, ltmpbb_d)
ftmpbb_d = np.append(ftmp_nu_int_d, ftmpbb_d)
nd_db = np.arange(0, len(ftmpbb_d), 1)
if tt == 0:
# For conv;
col3 = fits.Column(name='wavelength', format='E', unit='AA', array=ltmpbb_d)
nd_db = np.arange(0,len(ltmpbb_d),1)
col4 = fits.Column(name='colnum', format='K', unit='', array=nd_db)
col04 = [col3, col4]
# ASDF
tree_spec_dust.update({'wavelength': ltmpbb_d})
tree_spec_dust.update({'colnum': nd_db})
tree_spec_dust.update({'fspec_'+str(tt): ftmpbb_d})
tree.update({'spec_dust' : tree_spec_dust})
tree.update({'spec_dust_full' : tree_spec_dust_full})
print('dust updated.')
# Save;
af = asdf.AsdfFile(tree)
af.write_to(DIR_TMP + 'spec_all_' + ID + '.asdf', all_array_compression='zlib')
# Re-register
MB.af = af
##########################################
# For observation.
# Write out for the Multi-component fitting.
##########################################
fw = open(DIR_TMP + 'spec_obs_' + ID + '.cat', 'w')
fw.write('# BB data (>%d) in this file are not used in fitting.\n'%(ncolbb))
for ii in range(len(lm)):
if fgrs[ii]==0: # G102
if lm[ii]/(1.+zbest) > lamliml and lm[ii]/(1.+zbest) < lamlimu:
fw.write('%d %.5f %.5e %.5e\n'%(ii, lm[ii], fobs[ii], eobs[ii]))
else:
fw.write('%d %.5f 0 1000\n'%(ii, lm[ii]))
elif fgrs[ii]==1: # G141
if lm[ii]/(1.+zbest) > lamliml and lm[ii]/(1.+zbest) < lamlimu:
fw.write('%d %.5f %.5e %.5e\n'%(ii+1000, lm[ii], fobs[ii], eobs[ii]))
else:
fw.write('%d %.5f 0 1000\n'%(ii+1000, lm[ii]))
for ii in range(len(ltmpbb[0,:])):
if SFILT[ii] in SKIPFILT:# data point to be skiped;
fw.write('%d %.5f %.5e %.5e\n'%(ii+ncolbb, ltmpbb[0,ii], 0.0, fbb[ii]))
#fw.write('%d %.5f %.5e %.5e\n'%(ii+ncolbb, ltmpbb[0,ii], 0.0, 1000))
elif ebb[ii]>ebblim:
fw.write('%d %.5f 0 1000\n'%(ii+ncolbb, ltmpbb[0,ii]))
else:
fw.write('%d %.5f %.5e %.5e\n'%(ii+ncolbb, ltmpbb[0,ii], fbb[ii], ebb[ii]))
fw.close()
fw = open(DIR_TMP + 'spec_dust_obs_' + ID + '.cat', 'w')
if f_dust:
nbblast = len(ltmpbb[0,:])+len(lm)
for ii in range(len(ebb_d[:])):
if ebb_d[ii]>ebblim:
fw.write('%d %.5f 0 1000\n'%(ii+ncolbb+nbblast, ltmpbb_d[ii+nbblast]))
else:
fw.write('%d %.5f %.5e %.5e\n'%(ii+ncolbb+nbblast, ltmpbb_d[ii+nbblast], fbb_d[ii], ebb_d[ii]))
fw.close()
# BB phot
fw = open(DIR_TMP + 'bb_obs_' + ID + '.cat', 'w')
fw_rem = open(DIR_TMP + 'bb_obs_' + ID + '_removed.cat', 'w')
for ii in range(len(ltmpbb[0,:])):
if SFILT[ii] in SKIPFILT:# data point to be skiped;
fw.write('%d %.5f %.5e %.5e %.1f\n'%(ii+ncolbb, ltmpbb[0,ii], 0.0, fbb[ii], FWFILT[ii]/2.))
fw_rem.write('%d %.5f %.5e %.5e %.1f\n'%(ii+ncolbb, ltmpbb[0,ii], fbb[ii], ebb[ii], FWFILT[ii]/2.))
elif ebb[ii]>ebblim:
fw.write('%d %.5f 0 1000 %.1f\n'%(ii+ncolbb, ltmpbb[0,ii], FWFILT[ii]/2.))
elif ebb[ii]<=0:
fw.write('%d %.5f 0 -99 %.1f\n'%(ii+ncolbb, ltmpbb[0,ii], FWFILT[ii]/2.))
else:
fw.write('%d %.5f %.5e %.5e %.1f\n'%(ii+ncolbb, ltmpbb[0,ii], fbb[ii], ebb[ii], FWFILT[ii]/2.))
fw.close()
fw_rem.close()
# Dust
fw = open(DIR_TMP + 'bb_dust_obs_' + ID + '.cat', 'w')
if f_dust:
for ii in range(len(ebb_d[:])):
if ebb_d[ii]>ebblim:
fw.write('%d %.5f 0 1000 %.1f\n'%(ii+ncolbb+nbblast, ltmpbb_d[ii+nbblast], DFWFILT[ii]/2.))
else:
fw.write('%d %.5f %.5e %.5e %.1f\n'%(ii+ncolbb+nbblast, ltmpbb_d[ii+nbblast], fbb_d[ii], ebb_d[ii], DFWFILT[ii]/2.))
fw.close()
print('Done making templates at z=%.2f.\n'%zbest)
return True
| 35.872765
| 165
| 0.478901
| 6,743
| 52,159
| 3.582085
| 0.093727
| 0.01536
| 0.011261
| 0.013248
| 0.837998
| 0.829469
| 0.820982
| 0.806409
| 0.793409
| 0.78132
| 0
| 0.036116
| 0.341744
| 52,159
| 1,453
| 166
| 35.897454
| 0.667385
| 0.106923
| 0
| 0.843157
| 0
| 0
| 0.125111
| 0.005886
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005994
| false
| 0.016983
| 0.013986
| 0
| 0.035964
| 0.087912
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cb76ded19f03a96147319cbf6fa0c4e44601b326
| 2,157
|
py
|
Python
|
RFEM/Reports/partsList.py
|
Dlubal-Software/RFEM_Python_Client
|
9e29c598dadf380d49677c463931f0be659ccc40
|
[
"MIT"
] | 16
|
2021-10-13T21:00:11.000Z
|
2022-03-21T11:12:09.000Z
|
RFEM/Reports/partsList.py
|
Dlubal-Software/RFEM_Python_Client
|
9e29c598dadf380d49677c463931f0be659ccc40
|
[
"MIT"
] | 49
|
2021-10-19T13:18:51.000Z
|
2022-03-30T08:20:17.000Z
|
RFEM/Reports/partsList.py
|
Dlubal-Software/RFEM_Python_Client
|
9e29c598dadf380d49677c463931f0be659ccc40
|
[
"MIT"
] | 7
|
2021-10-13T06:06:24.000Z
|
2022-03-29T17:48:39.000Z
|
from RFEM.initModel import Model
def GetPartsListAllByMaterial(model = Model):
'''
Returns Parts List All By Material
'''
try:
return model.clientModel.service.get_parts_list_all_by_material()
except:
model.clientModel.service.generate_parts_lists()
return model.clientModel.service.get_parts_list_all_by_material()
def GetPartsListMemberRepresentativesByMaterial(model = Model):
'''
Returns Parts List Member Representatives By Material
'''
try:
return model.clientModel.service.get_parts_list_member_representatives_by_material()
except:
model.clientModel.service.generate_parts_lists()
return model.clientModel.service.get_parts_list_member_representatives_by_material()
def GetPartsListMemberSetsByMaterial(model = Model):
'''
Returns Parts List Member Sets By Material
'''
try:
return model.clientModel.service.get_parts_list_member_sets_by_material()
except:
model.clientModel.service.generate_parts_lists()
return model.clientModel.service.get_parts_list_member_sets_by_material()
def GetPartsListMembersByMaterial(model = Model):
'''
Returns Parts List Members By Material
'''
try:
return model.clientModel.service.get_parts_list_members_by_material()
except:
model.clientModel.service.generate_parts_lists()
return model.clientModel.service.get_parts_list_members_by_material()
def GetPartsListSolidsByMaterial(model = Model):
'''
Returns Parts List Solids By Material
'''
try:
return model.clientModel.service.get_parts_list_solids_by_material()
except:
model.clientModel.service.generate_parts_lists()
return model.clientModel.service.get_parts_list_solids_by_material()
def GetPartsListSurfacessByMaterial(model = Model):
'''
Returns Parts List Surfaces By Material
'''
try:
return model.clientModel.service.get_parts_list_surfaces_by_material()
except:
model.clientModel.service.generate_parts_lists()
return model.clientModel.service.get_parts_list_surfaces_by_material()
| 34.238095
| 92
| 0.743162
| 239
| 2,157
| 6.389121
| 0.133891
| 0.10609
| 0.27112
| 0.227898
| 0.846758
| 0.762934
| 0.708579
| 0.708579
| 0.708579
| 0.708579
| 0
| 0
| 0.18127
| 2,157
| 62
| 93
| 34.790323
| 0.864666
| 0.114975
| 0
| 0.810811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162162
| false
| 0
| 0.027027
| 0
| 0.513514
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 10
|
cb7b0d1df4623810d5039e8bedd283832bc809aa
| 138
|
py
|
Python
|
src/IsingRegisterAllocator/util/get_qubo/__init__.py
|
kumagaimasahito/IsingRegisterAllocator
|
7d20f56ee035fcaff456ab7641e51bad4b68144f
|
[
"MIT"
] | 1
|
2021-05-04T06:56:42.000Z
|
2021-05-04T06:56:42.000Z
|
src/IsingRegisterAllocator/util/get_qubo/__init__.py
|
kumagaimasahito/IsingRegisterAllocator
|
7d20f56ee035fcaff456ab7641e51bad4b68144f
|
[
"MIT"
] | 1
|
2021-03-31T14:56:27.000Z
|
2021-03-31T14:56:27.000Z
|
src/IsingRegisterAllocator/util/get_qubo/__init__.py
|
kumagaimasahito/IsingRegisterAllocator
|
7d20f56ee035fcaff456ab7641e51bad4b68144f
|
[
"MIT"
] | null | null | null |
from .by_amplify import by_amplify
from .by_amplify_splitted import by_amplify_splitted
from .by_amplify_limited import by_amplify_limited
| 46
| 52
| 0.898551
| 22
| 138
| 5.181818
| 0.272727
| 0.473684
| 0.342105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07971
| 138
| 3
| 53
| 46
| 0.897638
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cb89a485a1eb22a9559a439b9cc92034964417c0
| 25,191
|
py
|
Python
|
model/vcr_caption.py
|
MyLittleChange/SEITU
|
00521367542d09630097a0c6573a23d86d452a34
|
[
"MIT"
] | 2
|
2021-06-28T09:10:31.000Z
|
2021-11-25T11:09:19.000Z
|
model/vcr_caption.py
|
MyLittleChange/SEITU
|
00521367542d09630097a0c6573a23d86d452a34
|
[
"MIT"
] | 1
|
2021-12-01T13:04:03.000Z
|
2021-12-04T12:11:12.000Z
|
model/vcr_caption.py
|
MyLittleChange/SEITU
|
00521367542d09630097a0c6573a23d86d452a34
|
[
"MIT"
] | 1
|
2021-06-08T14:51:04.000Z
|
2021-06-08T14:51:04.000Z
|
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Uniter for VCR model
"""
import torch
from transformers.utils import logging
from torch.nn import functional as F
from torch.nn.init import xavier_normal_
logger = logging.get_logger(__name__)
from collections import defaultdict
from torch import nn
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .model import (
UniterPreTrainedModel, UniterModel)
import numpy as np
class UniterForVisualCommonsenseReasoning(UniterPreTrainedModel):
""" Finetune UNITER for VCR
"""
def __init__(self, config, img_dim):
super().__init__(config, img_dim)
self.uniter = UniterModel(config, img_dim)
self.vcr_output = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size*2),
nn.ReLU(),
LayerNorm(config.hidden_size*2, eps=1e-12),
nn.Linear(config.hidden_size*2, 2)
)
self.apply(self.init_weights)
self.criterion = torch.nn.CrossEntropyLoss(reduction='mean')
def init_type_embedding(self):
new_emb = nn.Embedding(4, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
for i in [0, 1]:
emb = self.uniter.embeddings.token_type_embeddings.weight.data[i, :]
new_emb.weight.data[i, :].copy_(emb)
emb = self.uniter.embeddings.token_type_embeddings.weight.data[0, :]
new_emb.weight.data[2, :].copy_(emb)
new_emb.weight.data[3, :].copy_(emb)
self.uniter.embeddings.token_type_embeddings = new_emb
def init_word_embedding(self, num_special_tokens):
orig_word_num = self.uniter.embeddings.word_embeddings.weight.size(0)
new_emb = nn.Embedding(
orig_word_num + num_special_tokens, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
emb = self.uniter.embeddings.word_embeddings.weight.data
new_emb.weight.data[:orig_word_num, :].copy_(emb)
self.uniter.embeddings.word_embeddings = new_emb
def forward(self, batch, compute_loss=False):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
txt_type_ids = batch['txt_type_ids']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False,
txt_type_ids=txt_type_ids)
pooled_output = self.uniter.pooler(sequence_output)
rank_scores = self.vcr_output(pooled_output)
targets = batch['a_targets']
loss = F.cross_entropy(
rank_scores, targets.squeeze(-1),
reduction='mean')
rank_scores=rank_scores[:,1:]
out=rank_scores.view(rank_scores.shape[0]//4,-1)
tar=targets.view(targets.shape[0]//4,-1)
output=out.max(dim=-1)[1]
ans=np.nonzero(tar)[:,1]
matched_qa = output == ans
return rank_scores,loss,matched_qa
class UniterForVisualCommonsenseReasoning_match(UniterPreTrainedModel):
""" Finetune UNITER for VCR
"""
def __init__(self, config, img_dim):
super().__init__(config, img_dim)
self.uniter = UniterModel(config, img_dim)
self.hidden_size=config.hidden_size
self.vcr_output = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size*2),
nn.ReLU(),
LayerNorm(config.hidden_size*2, eps=1e-12),
nn.Linear(config.hidden_size*2, 2)
)
self.vcr_output_match = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size * 2),
nn.ReLU(),
LayerNorm(config.hidden_size * 2, eps=1e-12),
nn.Linear(config.hidden_size * 2, 4)
)
self.dense_avg = nn.Linear(config.hidden_size*2, config.hidden_size)
self.activation = nn.Tanh()
self.apply(self.init_weights)
# self.criterion = torch.nn.CrossEntropyLoss(reduction='mean')
def init_type_embedding(self):
new_emb = nn.Embedding(4, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
for i in [0, 1]:
emb = self.uniter.embeddings.token_type_embeddings.weight.data[i, :]
new_emb.weight.data[i, :].copy_(emb)
emb = self.uniter.embeddings.token_type_embeddings.weight.data[0, :]
new_emb.weight.data[2, :].copy_(emb)
new_emb.weight.data[3, :].copy_(emb)
self.uniter.embeddings.token_type_embeddings = new_emb
def init_word_embedding(self, num_special_tokens):
orig_word_num = self.uniter.embeddings.word_embeddings.weight.size(0)
new_emb = nn.Embedding(
orig_word_num + num_special_tokens, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
emb = self.uniter.embeddings.word_embeddings.weight.data
new_emb.weight.data[:orig_word_num, :].copy_(emb)
self.uniter.embeddings.word_embeddings = new_emb
def forward(self, batch, compute_loss=False):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
txt_type_ids = batch['txt_type_ids']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False,
txt_type_ids=txt_type_ids)
pooled_output = self.uniter.pooler(sequence_output)
rank_scores = self.vcr_output(pooled_output)
targets = batch['a_targets']
cls_mask=targets!=2
#2用来区分是不是做match
cls_targets = torch.masked_select(targets, cls_mask)
cls_mask_score=cls_mask.expand(cls_mask.shape[0],2)
cls_rank_scores=torch.masked_select(rank_scores, cls_mask_score).reshape(-1,2)
loss = F.cross_entropy(
cls_rank_scores, cls_targets,
reduction='mean')
cls_rank_scores=cls_rank_scores[:,1:]
out=cls_rank_scores.view(cls_rank_scores.shape[0]//4,-1)
tar=cls_targets.view(cls_targets.shape[0]//4,-1)
output=out.max(dim=-1)[1]
ans=np.nonzero(tar)[:,1]
matched_qa = output == ans
match_iter=batch['match_iter']
match_iter=match_iter[::5,:]
#重复了5遍
ans_mask=batch['ans_mask'][4::5,:]
match_pooler=self.match_pooler(sequence_output[4::5,:],ans_mask)
match_scores=self.vcr_output_match(match_pooler)
match_loss=F.cross_entropy(match_scores.reshape(match_scores.shape[0]*match_scores.shape[1],-1),match_iter.reshape(-1),reduction='mean')
return rank_scores,loss,match_loss,matched_qa
def match_pooler(self,sequence_output,ans_mask):
#输入是bs*4,需要变换为坐标点
first_token_tensor = sequence_output[:, 0]
q_num=sequence_output.shape[0]
pad = torch.zeros((q_num, sequence_output.shape[1] - ans_mask.shape[1]), dtype=torch.int64).cuda()
ans_mask = torch.cat((ans_mask, pad), dim=1)
ans_mask = ans_mask.unsqueeze(-1).expand((ans_mask.shape[0], ans_mask.shape[1], sequence_output.shape[-1]))
ans_tensor = []
for i in range(1, 5):
mask = ans_mask == i
ans_token = torch.masked_select(sequence_output, mask)
ans_token = ans_token.view(-1, sequence_output.shape[-1])
ans_len = mask[:, :, 0].sum(dim=1)
cur_len = 0
ans_mean = torch.zeros((q_num, ans_token.shape[1]), dtype=ans_token.dtype).cuda()
for i in range(len(ans_len)):
ans_mean[i] = ans_token[cur_len:cur_len + ans_len[i]].mean(dim=0)
cur_len += ans_len[i]
ans_tensor.append(ans_mean)
ans_tensor = torch.stack(ans_tensor, dim=1)
first_token_tensor = first_token_tensor.unsqueeze(1).expand(sequence_output.shape[0], 4, sequence_output.shape[-1])
first_ans_token = torch.cat((first_token_tensor, ans_tensor), dim=-1)
avg_pooled_output = self.dense_avg(first_ans_token)
avg_pooled_output = self.activation(avg_pooled_output)
return avg_pooled_output
class UniterForVisualCommonsenseReasoning_inf(UniterPreTrainedModel):
""" Finetune UNITER for VCR
"""
def __init__(self, config, img_dim):
super().__init__(config, img_dim)
self.uniter = UniterModel(config, img_dim)
self.vcr_output = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size*2),
nn.ReLU(),
LayerNorm(config.hidden_size*2, eps=1e-12),
nn.Linear(config.hidden_size*2, 2)
)
self.apply(self.init_weights)
self.criterion = torch.nn.CrossEntropyLoss(reduction='mean')
def init_type_embedding(self):
new_emb = nn.Embedding(4, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
for i in [0, 1]:
emb = self.uniter.embeddings.token_type_embeddings.weight.data[i, :]
new_emb.weight.data[i, :].copy_(emb)
emb = self.uniter.embeddings.token_type_embeddings.weight.data[0, :]
new_emb.weight.data[2, :].copy_(emb)
new_emb.weight.data[3, :].copy_(emb)
self.uniter.embeddings.token_type_embeddings = new_emb
def init_word_embedding(self, num_special_tokens):
orig_word_num = self.uniter.embeddings.word_embeddings.weight.size(0)
new_emb = nn.Embedding(
orig_word_num + num_special_tokens, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
emb = self.uniter.embeddings.word_embeddings.weight.data
new_emb.weight.data[:orig_word_num, :].copy_(emb)
self.uniter.embeddings.word_embeddings = new_emb
def forward(self, batch,task):
batch = defaultdict(lambda: None, batch)
qa_token = batch['qa_token']
qa_num=len(batch)//5
qar_num=len(batch)-qa_num
assert qa_num*4==qar_num
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
txt_type_ids = batch['txt_type_ids']
if task=='qa':
qa_mask = qa_token == 1
input_ids=torch.masked_select(input_ids,qa_mask).reshape(qa_num,-1)
position_ids = torch.masked_select(position_ids, qa_mask).reshape(qa_num, -1)
img_feat = torch.masked_select(img_feat, qa_mask).reshape(qa_num, -1)
img_pos_feat = torch.masked_select(img_pos_feat, qa_mask).reshape(qa_num, -1)
attn_masks = torch.masked_select(attn_masks, qa_mask).reshape(qa_num, -1)
gather_index = torch.masked_select(gather_index, qa_mask).reshape(qa_num, -1)
txt_type_ids = torch.masked_select(txt_type_ids, qa_mask).reshape(qa_num, -1)
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False,
txt_type_ids=txt_type_ids)
pooled_output = self.uniter.pooler(sequence_output)
rank_scores = self.vcr_output(pooled_output)
else:
qar_mask = qa_token == 0
input_ids = torch.masked_select(input_ids, qar_mask).reshape(qar_num, -1)
position_ids = torch.masked_select(position_ids, qar_mask).reshape(qar_num, -1)
img_feat = torch.masked_select(img_feat, qar_mask).reshape(qar_num, -1)
img_pos_feat = torch.masked_select(img_pos_feat, qar_mask).reshape(qar_num, -1)
attn_masks = torch.masked_select(attn_masks, qar_mask).reshape(qar_num, -1)
gather_index = torch.masked_select(gather_index, qar_mask).reshape(qar_num, -1)
txt_type_ids = torch.masked_select(txt_type_ids, qar_mask).reshape(qar_num, -1)
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False,
txt_type_ids=txt_type_ids)
pooled_output = self.uniter.pooler(sequence_output)
rank_scores = self.vcr_output(pooled_output)
return rank_scores
class Uniter_Four_two(UniterPreTrainedModel):
""" Finetune UNITER for VCR
"""
def __init__(self, config, img_dim):
super().__init__(config, img_dim)
self.uniter = UniterModel(config, img_dim)
self.vcr_output = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size*2),
nn.ReLU(),
LayerNorm(config.hidden_size*2, eps=1e-12),
nn.Linear(config.hidden_size*2, 2)
)
self.apply(self.init_weights)
self.criterion = torch.nn.CrossEntropyLoss(reduction='mean')
self.dense = nn.Linear(config.hidden_size*2, config.hidden_size)
self.activation = nn.Tanh()
def init_type_embedding(self):
new_emb = nn.Embedding(4, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
for i in [0, 1]:
emb = self.uniter.embeddings.token_type_embeddings.weight.data[i, :]
new_emb.weight.data[i, :].copy_(emb)
emb = self.uniter.embeddings.token_type_embeddings.weight.data[0, :]
new_emb.weight.data[2, :].copy_(emb)
new_emb.weight.data[3, :].copy_(emb)
self.uniter.embeddings.token_type_embeddings = new_emb
def init_word_embedding(self, num_special_tokens):
orig_word_num = self.uniter.embeddings.word_embeddings.weight.size(0)
new_emb = nn.Embedding(
orig_word_num + num_special_tokens, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
emb = self.uniter.embeddings.word_embeddings.weight.data
new_emb.weight.data[:orig_word_num, :].copy_(emb)
self.uniter.embeddings.word_embeddings = new_emb
def forward(self, batch, compute_loss=False):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
txt_type_ids = batch['txt_type_ids']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False,
txt_type_ids=txt_type_ids)
ans_index=batch['ans_index']
pooled_output = self.pooler(sequence_output,ans_index)
rank_scores = self.vcr_output(pooled_output)
targets = batch['a_targets']
loss =F.cross_entropy(
rank_scores.reshape(rank_scores.shape[0]*rank_scores.shape[1],-1), targets.view(-1),
reduction='mean')
rank_scores_soft=F.softmax(rank_scores,dim=-1)
# rank_scores_one=rank_scores_soft[:,:,1:]
# out=rank_scores.view(rank_scores.shape[0]//4,-1)
# label=batch['a_label']
# output = rank_scores_one.max(dim=1)[1].squeeze()
rank_scores_out=rank_scores_soft.max(dim=2)[1]
matched_qa_tmp = rank_scores_out == targets
matched_qa=matched_qa_tmp.all(dim=1)
return rank_scores,loss,matched_qa
def pooler(self, hidden_states,ans_index):
pad=torch.zeros((ans_index.shape[0],hidden_states.shape[1]-ans_index.shape[1]),dtype=torch.int64).cuda()
ans_index=torch.cat((ans_index,pad),dim=1)
ans_index=ans_index.unsqueeze(-1).expand((ans_index.shape[0],ans_index.shape[1],hidden_states.shape[-1]))
mask=ans_index>0
first_token_tensor = hidden_states[:, 0]
ans_token=torch.masked_select(hidden_states,mask)
ans_token=ans_token.view((hidden_states.shape[0],5,hidden_states.shape[-1]))
ans_token=ans_token[:,1:,:]
#取每个问题后面的SEP对应操作
first_token_tensor=first_token_tensor.unsqueeze(1).expand(hidden_states.shape[0],4,hidden_states.shape[-1])
first_ans_token=torch.cat((first_token_tensor,ans_token),dim=-1)
pooled_output = self.dense(first_ans_token)
pooled_output = self.activation(pooled_output)
return pooled_output
class Uniter_Four(UniterPreTrainedModel):
""" Finetune UNITER for VCR
"""
def __init__(self, config, img_dim):
super().__init__(config, img_dim)
self.uniter = UniterModel(config, img_dim)
self.vcr_output = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size*2),
nn.ReLU(),
LayerNorm(config.hidden_size*2, eps=1e-12),
nn.Linear(config.hidden_size*2, 4)
)
self.apply(self.init_weights)
self.criterion = torch.nn.CrossEntropyLoss(reduction='mean')
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def init_type_embedding(self):
new_emb = nn.Embedding(4, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
for i in [0, 1]:
emb = self.uniter.embeddings.token_type_embeddings.weight.data[i, :]
new_emb.weight.data[i, :].copy_(emb)
emb = self.uniter.embeddings.token_type_embeddings.weight.data[0, :]
new_emb.weight.data[2, :].copy_(emb)
new_emb.weight.data[3, :].copy_(emb)
self.uniter.embeddings.token_type_embeddings = new_emb
def init_word_embedding(self, num_special_tokens):
orig_word_num = self.uniter.embeddings.word_embeddings.weight.size(0)
new_emb = nn.Embedding(
orig_word_num + num_special_tokens, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
emb = self.uniter.embeddings.word_embeddings.weight.data
new_emb.weight.data[:orig_word_num, :].copy_(emb)
self.uniter.embeddings.word_embeddings = new_emb
def forward(self, batch, compute_loss=False):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
txt_type_ids = batch['txt_type_ids']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False,
txt_type_ids=txt_type_ids)
ans_index=batch['ans_index']
pooled_output = self.pooler(sequence_output)
rank_scores = self.vcr_output(pooled_output)
targets = batch['a_label']
loss =F.cross_entropy(rank_scores, targets,reduction='mean')
output=rank_scores.max(dim=1)[1]
matched_qa = output == targets
return rank_scores,loss,matched_qa
def pooler(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class Uniter_Four_match(UniterPreTrainedModel):
""" Finetune UNITER for VCR
"""
def __init__(self, config, img_dim):
super().__init__(config, img_dim)
self.uniter = UniterModel(config, img_dim)
self.vcr_output = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size*2),
nn.ReLU(),
LayerNorm(config.hidden_size*2, eps=1e-12),
nn.Linear(config.hidden_size*2, 4)
)
self.vcr_output_match = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size*2),
nn.ReLU(),
LayerNorm(config.hidden_size*2, eps=1e-12),
nn.Linear(config.hidden_size*2, 4)
)
self.apply(self.init_weights)
self.criterion = torch.nn.CrossEntropyLoss(reduction='mean')
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dense_avg = nn.Linear(config.hidden_size*2, config.hidden_size)
self.activation = nn.Tanh()
def init_type_embedding(self):
new_emb = nn.Embedding(4, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
for i in [0, 1]:
emb = self.uniter.embeddings.token_type_embeddings.weight.data[i, :]
new_emb.weight.data[i, :].copy_(emb)
emb = self.uniter.embeddings.token_type_embeddings.weight.data[0, :]
new_emb.weight.data[2, :].copy_(emb)
new_emb.weight.data[3, :].copy_(emb)
self.uniter.embeddings.token_type_embeddings = new_emb
def init_word_embedding(self, num_special_tokens):
orig_word_num = self.uniter.embeddings.word_embeddings.weight.size(0)
new_emb = nn.Embedding(
orig_word_num + num_special_tokens, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
emb = self.uniter.embeddings.word_embeddings.weight.data
new_emb.weight.data[:orig_word_num, :].copy_(emb)
self.uniter.embeddings.word_embeddings = new_emb
def forward(self, batch, compute_loss=False):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
txt_type_ids = batch['txt_type_ids']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False,
txt_type_ids=txt_type_ids)
ans_mask=batch['ans_mask']
pooled_output,avg_pooled_output = self.pooler(sequence_output,ans_mask)
rank_scores = self.vcr_output(pooled_output)
match_scores=self.vcr_output_match(avg_pooled_output)
labels = batch['a_label']
loss =F.cross_entropy(rank_scores, labels,reduction='mean')
match_iter=batch['match_iter']
match_loss=F.cross_entropy(match_scores.reshape(match_scores.shape[0]*match_scores.shape[1],-1),match_iter.view(-1),reduction='mean')
output=rank_scores.max(dim=1)[1]
matched_qa = output == labels
return rank_scores,loss,match_loss,matched_qa
def pooler(self, hidden_states,ans_mask):
bs=hidden_states.shape[0]
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
pad = torch.zeros((bs, hidden_states.shape[1] - ans_mask.shape[1]), dtype=torch.int64).cuda()
ans_mask = torch.cat((ans_mask, pad), dim=1)
ans_mask = ans_mask.unsqueeze(-1).expand((ans_mask.shape[0], ans_mask.shape[1], hidden_states.shape[-1]))
ans_tensor = []
for i in range(1, 5):
mask = ans_mask == i
ans_token = torch.masked_select(hidden_states, mask)
ans_token = ans_token.view(-1, hidden_states.shape[-1])
ans_len = mask[:, :, 0].sum(dim=1)
cur_len=0
ans_mean=torch.zeros((bs,ans_token.shape[1]),dtype=ans_token.dtype).cuda()
for i in range(len(ans_len)):
ans_mean[i]=ans_token[cur_len:cur_len+ans_len[i]].mean(dim=0)
cur_len+=ans_len[i]
ans_tensor.append(ans_mean)
ans_tensor=torch.stack(ans_tensor,dim=1)
first_token_tensor = first_token_tensor.unsqueeze(1).expand(hidden_states.shape[0], 4, hidden_states.shape[-1])
first_ans_token = torch.cat((first_token_tensor, ans_tensor), dim=-1)
avg_pooled_output = self.dense_avg(first_ans_token)
avg_pooled_output = self.activation(avg_pooled_output)
return pooled_output,avg_pooled_output
| 48.258621
| 144
| 0.639236
| 3,315
| 25,191
| 4.540875
| 0.053695
| 0.043181
| 0.05846
| 0.045838
| 0.88102
| 0.862619
| 0.842423
| 0.814123
| 0.814123
| 0.780907
| 0
| 0.013388
| 0.249811
| 25,191
| 522
| 145
| 48.258621
| 0.783152
| 0.021357
| 0
| 0.758621
| 0
| 0
| 0.02451
| 0
| 0
| 0
| 0
| 0
| 0.002155
| 1
| 0.060345
| false
| 0
| 0.019397
| 0
| 0.114224
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cbb91ffe155fb105f927d7e38e7048500ad60a7b
| 55,183
|
py
|
Python
|
sdk/python/pulumi_aws/glue/dev_endpoint.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/glue/dev_endpoint.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/glue/dev_endpoint.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['DevEndpointArgs', 'DevEndpoint']
@pulumi.input_type
class DevEndpointArgs:
def __init__(__self__, *,
role_arn: pulumi.Input[str],
arguments: Optional[pulumi.Input[Mapping[str, Any]]] = None,
extra_jars_s3_path: Optional[pulumi.Input[str]] = None,
extra_python_libs_s3_path: Optional[pulumi.Input[str]] = None,
glue_version: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
number_of_nodes: Optional[pulumi.Input[int]] = None,
number_of_workers: Optional[pulumi.Input[int]] = None,
public_key: Optional[pulumi.Input[str]] = None,
public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
security_configuration: Optional[pulumi.Input[str]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
worker_type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DevEndpoint resource.
:param pulumi.Input[str] role_arn: The IAM role for this endpoint.
:param pulumi.Input[Mapping[str, Any]] arguments: A map of arguments used to configure the endpoint.
:param pulumi.Input[str] extra_jars_s3_path: Path to one or more Java Jars in an S3 bucket that should be loaded in this endpoint.
:param pulumi.Input[str] extra_python_libs_s3_path: Path(s) to one or more Python libraries in an S3 bucket that should be loaded in this endpoint. Multiple values must be complete paths separated by a comma.
:param pulumi.Input[str] glue_version: - Specifies the versions of Python and Apache Spark to use. Defaults to AWS Glue version 0.9.
:param pulumi.Input[str] name: The name of this endpoint. It must be unique in your account.
:param pulumi.Input[int] number_of_nodes: The number of AWS Glue Data Processing Units (DPUs) to allocate to this endpoint. Conflicts with `worker_type`.
:param pulumi.Input[int] number_of_workers: The number of workers of a defined worker type that are allocated to this endpoint. This field is available only when you choose worker type G.1X or G.2X.
:param pulumi.Input[str] public_key: The public key to be used by this endpoint for authentication.
:param pulumi.Input[Sequence[pulumi.Input[str]]] public_keys: A list of public keys to be used by this endpoint for authentication.
:param pulumi.Input[str] security_configuration: The name of the Security Configuration structure to be used with this endpoint.
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_group_ids: Security group IDs for the security groups to be used by this endpoint.
:param pulumi.Input[str] subnet_id: The subnet ID for the new endpoint to use.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[str] worker_type: The type of predefined worker that is allocated to this endpoint. Accepts a value of Standard, G.1X, or G.2X.
"""
pulumi.set(__self__, "role_arn", role_arn)
if arguments is not None:
pulumi.set(__self__, "arguments", arguments)
if extra_jars_s3_path is not None:
pulumi.set(__self__, "extra_jars_s3_path", extra_jars_s3_path)
if extra_python_libs_s3_path is not None:
pulumi.set(__self__, "extra_python_libs_s3_path", extra_python_libs_s3_path)
if glue_version is not None:
pulumi.set(__self__, "glue_version", glue_version)
if name is not None:
pulumi.set(__self__, "name", name)
if number_of_nodes is not None:
pulumi.set(__self__, "number_of_nodes", number_of_nodes)
if number_of_workers is not None:
pulumi.set(__self__, "number_of_workers", number_of_workers)
if public_key is not None:
pulumi.set(__self__, "public_key", public_key)
if public_keys is not None:
pulumi.set(__self__, "public_keys", public_keys)
if security_configuration is not None:
pulumi.set(__self__, "security_configuration", security_configuration)
if security_group_ids is not None:
pulumi.set(__self__, "security_group_ids", security_group_ids)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if worker_type is not None:
pulumi.set(__self__, "worker_type", worker_type)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Input[str]:
"""
The IAM role for this endpoint.
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter
def arguments(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A map of arguments used to configure the endpoint.
"""
return pulumi.get(self, "arguments")
@arguments.setter
def arguments(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "arguments", value)
@property
@pulumi.getter(name="extraJarsS3Path")
def extra_jars_s3_path(self) -> Optional[pulumi.Input[str]]:
"""
Path to one or more Java Jars in an S3 bucket that should be loaded in this endpoint.
"""
return pulumi.get(self, "extra_jars_s3_path")
@extra_jars_s3_path.setter
def extra_jars_s3_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "extra_jars_s3_path", value)
@property
@pulumi.getter(name="extraPythonLibsS3Path")
def extra_python_libs_s3_path(self) -> Optional[pulumi.Input[str]]:
"""
Path(s) to one or more Python libraries in an S3 bucket that should be loaded in this endpoint. Multiple values must be complete paths separated by a comma.
"""
return pulumi.get(self, "extra_python_libs_s3_path")
@extra_python_libs_s3_path.setter
def extra_python_libs_s3_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "extra_python_libs_s3_path", value)
@property
@pulumi.getter(name="glueVersion")
def glue_version(self) -> Optional[pulumi.Input[str]]:
"""
- Specifies the versions of Python and Apache Spark to use. Defaults to AWS Glue version 0.9.
"""
return pulumi.get(self, "glue_version")
@glue_version.setter
def glue_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "glue_version", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of this endpoint. It must be unique in your account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="numberOfNodes")
def number_of_nodes(self) -> Optional[pulumi.Input[int]]:
"""
The number of AWS Glue Data Processing Units (DPUs) to allocate to this endpoint. Conflicts with `worker_type`.
"""
return pulumi.get(self, "number_of_nodes")
@number_of_nodes.setter
def number_of_nodes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "number_of_nodes", value)
@property
@pulumi.getter(name="numberOfWorkers")
def number_of_workers(self) -> Optional[pulumi.Input[int]]:
"""
The number of workers of a defined worker type that are allocated to this endpoint. This field is available only when you choose worker type G.1X or G.2X.
"""
return pulumi.get(self, "number_of_workers")
@number_of_workers.setter
def number_of_workers(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "number_of_workers", value)
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> Optional[pulumi.Input[str]]:
"""
The public key to be used by this endpoint for authentication.
"""
return pulumi.get(self, "public_key")
@public_key.setter
def public_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_key", value)
@property
@pulumi.getter(name="publicKeys")
def public_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of public keys to be used by this endpoint for authentication.
"""
return pulumi.get(self, "public_keys")
@public_keys.setter
def public_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "public_keys", value)
@property
@pulumi.getter(name="securityConfiguration")
def security_configuration(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Security Configuration structure to be used with this endpoint.
"""
return pulumi.get(self, "security_configuration")
@security_configuration.setter
def security_configuration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_configuration", value)
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Security group IDs for the security groups to be used by this endpoint.
"""
return pulumi.get(self, "security_group_ids")
@security_group_ids.setter
def security_group_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "security_group_ids", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The subnet ID for the new endpoint to use.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter(name="workerType")
def worker_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of predefined worker that is allocated to this endpoint. Accepts a value of Standard, G.1X, or G.2X.
"""
return pulumi.get(self, "worker_type")
@worker_type.setter
def worker_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "worker_type", value)
@pulumi.input_type
class _DevEndpointState:
def __init__(__self__, *,
arguments: Optional[pulumi.Input[Mapping[str, Any]]] = None,
arn: Optional[pulumi.Input[str]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
extra_jars_s3_path: Optional[pulumi.Input[str]] = None,
extra_python_libs_s3_path: Optional[pulumi.Input[str]] = None,
failure_reason: Optional[pulumi.Input[str]] = None,
glue_version: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
number_of_nodes: Optional[pulumi.Input[int]] = None,
number_of_workers: Optional[pulumi.Input[int]] = None,
private_address: Optional[pulumi.Input[str]] = None,
public_address: Optional[pulumi.Input[str]] = None,
public_key: Optional[pulumi.Input[str]] = None,
public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
security_configuration: Optional[pulumi.Input[str]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
status: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
worker_type: Optional[pulumi.Input[str]] = None,
yarn_endpoint_address: Optional[pulumi.Input[str]] = None,
zeppelin_remote_spark_interpreter_port: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering DevEndpoint resources.
:param pulumi.Input[Mapping[str, Any]] arguments: A map of arguments used to configure the endpoint.
:param pulumi.Input[str] arn: The ARN of the endpoint.
:param pulumi.Input[str] availability_zone: The AWS availability zone where this endpoint is located.
:param pulumi.Input[str] extra_jars_s3_path: Path to one or more Java Jars in an S3 bucket that should be loaded in this endpoint.
:param pulumi.Input[str] extra_python_libs_s3_path: Path(s) to one or more Python libraries in an S3 bucket that should be loaded in this endpoint. Multiple values must be complete paths separated by a comma.
:param pulumi.Input[str] failure_reason: The reason for a current failure in this endpoint.
:param pulumi.Input[str] glue_version: - Specifies the versions of Python and Apache Spark to use. Defaults to AWS Glue version 0.9.
:param pulumi.Input[str] name: The name of this endpoint. It must be unique in your account.
:param pulumi.Input[int] number_of_nodes: The number of AWS Glue Data Processing Units (DPUs) to allocate to this endpoint. Conflicts with `worker_type`.
:param pulumi.Input[int] number_of_workers: The number of workers of a defined worker type that are allocated to this endpoint. This field is available only when you choose worker type G.1X or G.2X.
:param pulumi.Input[str] private_address: A private IP address to access the endpoint within a VPC, if this endpoint is created within one.
:param pulumi.Input[str] public_address: The public IP address used by this endpoint. The PublicAddress field is present only when you create a non-VPC endpoint.
:param pulumi.Input[str] public_key: The public key to be used by this endpoint for authentication.
:param pulumi.Input[Sequence[pulumi.Input[str]]] public_keys: A list of public keys to be used by this endpoint for authentication.
:param pulumi.Input[str] role_arn: The IAM role for this endpoint.
:param pulumi.Input[str] security_configuration: The name of the Security Configuration structure to be used with this endpoint.
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_group_ids: Security group IDs for the security groups to be used by this endpoint.
:param pulumi.Input[str] status: The current status of this endpoint.
:param pulumi.Input[str] subnet_id: The subnet ID for the new endpoint to use.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[str] vpc_id: he ID of the VPC used by this endpoint.
:param pulumi.Input[str] worker_type: The type of predefined worker that is allocated to this endpoint. Accepts a value of Standard, G.1X, or G.2X.
:param pulumi.Input[str] yarn_endpoint_address: The YARN endpoint address used by this endpoint.
:param pulumi.Input[int] zeppelin_remote_spark_interpreter_port: The Apache Zeppelin port for the remote Apache Spark interpreter.
"""
if arguments is not None:
pulumi.set(__self__, "arguments", arguments)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if availability_zone is not None:
pulumi.set(__self__, "availability_zone", availability_zone)
if extra_jars_s3_path is not None:
pulumi.set(__self__, "extra_jars_s3_path", extra_jars_s3_path)
if extra_python_libs_s3_path is not None:
pulumi.set(__self__, "extra_python_libs_s3_path", extra_python_libs_s3_path)
if failure_reason is not None:
pulumi.set(__self__, "failure_reason", failure_reason)
if glue_version is not None:
pulumi.set(__self__, "glue_version", glue_version)
if name is not None:
pulumi.set(__self__, "name", name)
if number_of_nodes is not None:
pulumi.set(__self__, "number_of_nodes", number_of_nodes)
if number_of_workers is not None:
pulumi.set(__self__, "number_of_workers", number_of_workers)
if private_address is not None:
pulumi.set(__self__, "private_address", private_address)
if public_address is not None:
pulumi.set(__self__, "public_address", public_address)
if public_key is not None:
pulumi.set(__self__, "public_key", public_key)
if public_keys is not None:
pulumi.set(__self__, "public_keys", public_keys)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
if security_configuration is not None:
pulumi.set(__self__, "security_configuration", security_configuration)
if security_group_ids is not None:
pulumi.set(__self__, "security_group_ids", security_group_ids)
if status is not None:
pulumi.set(__self__, "status", status)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if vpc_id is not None:
pulumi.set(__self__, "vpc_id", vpc_id)
if worker_type is not None:
pulumi.set(__self__, "worker_type", worker_type)
if yarn_endpoint_address is not None:
pulumi.set(__self__, "yarn_endpoint_address", yarn_endpoint_address)
if zeppelin_remote_spark_interpreter_port is not None:
pulumi.set(__self__, "zeppelin_remote_spark_interpreter_port", zeppelin_remote_spark_interpreter_port)
@property
@pulumi.getter
def arguments(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A map of arguments used to configure the endpoint.
"""
return pulumi.get(self, "arguments")
@arguments.setter
def arguments(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "arguments", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the endpoint.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> Optional[pulumi.Input[str]]:
"""
The AWS availability zone where this endpoint is located.
"""
return pulumi.get(self, "availability_zone")
@availability_zone.setter
def availability_zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "availability_zone", value)
@property
@pulumi.getter(name="extraJarsS3Path")
def extra_jars_s3_path(self) -> Optional[pulumi.Input[str]]:
"""
Path to one or more Java Jars in an S3 bucket that should be loaded in this endpoint.
"""
return pulumi.get(self, "extra_jars_s3_path")
@extra_jars_s3_path.setter
def extra_jars_s3_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "extra_jars_s3_path", value)
@property
@pulumi.getter(name="extraPythonLibsS3Path")
def extra_python_libs_s3_path(self) -> Optional[pulumi.Input[str]]:
"""
Path(s) to one or more Python libraries in an S3 bucket that should be loaded in this endpoint. Multiple values must be complete paths separated by a comma.
"""
return pulumi.get(self, "extra_python_libs_s3_path")
@extra_python_libs_s3_path.setter
def extra_python_libs_s3_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "extra_python_libs_s3_path", value)
@property
@pulumi.getter(name="failureReason")
def failure_reason(self) -> Optional[pulumi.Input[str]]:
"""
The reason for a current failure in this endpoint.
"""
return pulumi.get(self, "failure_reason")
@failure_reason.setter
def failure_reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "failure_reason", value)
@property
@pulumi.getter(name="glueVersion")
def glue_version(self) -> Optional[pulumi.Input[str]]:
"""
- Specifies the versions of Python and Apache Spark to use. Defaults to AWS Glue version 0.9.
"""
return pulumi.get(self, "glue_version")
@glue_version.setter
def glue_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "glue_version", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of this endpoint. It must be unique in your account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="numberOfNodes")
def number_of_nodes(self) -> Optional[pulumi.Input[int]]:
"""
The number of AWS Glue Data Processing Units (DPUs) to allocate to this endpoint. Conflicts with `worker_type`.
"""
return pulumi.get(self, "number_of_nodes")
@number_of_nodes.setter
def number_of_nodes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "number_of_nodes", value)
@property
@pulumi.getter(name="numberOfWorkers")
def number_of_workers(self) -> Optional[pulumi.Input[int]]:
"""
The number of workers of a defined worker type that are allocated to this endpoint. This field is available only when you choose worker type G.1X or G.2X.
"""
return pulumi.get(self, "number_of_workers")
@number_of_workers.setter
def number_of_workers(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "number_of_workers", value)
@property
@pulumi.getter(name="privateAddress")
def private_address(self) -> Optional[pulumi.Input[str]]:
"""
A private IP address to access the endpoint within a VPC, if this endpoint is created within one.
"""
return pulumi.get(self, "private_address")
@private_address.setter
def private_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_address", value)
@property
@pulumi.getter(name="publicAddress")
def public_address(self) -> Optional[pulumi.Input[str]]:
"""
The public IP address used by this endpoint. The PublicAddress field is present only when you create a non-VPC endpoint.
"""
return pulumi.get(self, "public_address")
@public_address.setter
def public_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_address", value)
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> Optional[pulumi.Input[str]]:
"""
The public key to be used by this endpoint for authentication.
"""
return pulumi.get(self, "public_key")
@public_key.setter
def public_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_key", value)
@property
@pulumi.getter(name="publicKeys")
def public_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of public keys to be used by this endpoint for authentication.
"""
return pulumi.get(self, "public_keys")
@public_keys.setter
def public_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "public_keys", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[pulumi.Input[str]]:
"""
The IAM role for this endpoint.
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter(name="securityConfiguration")
def security_configuration(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Security Configuration structure to be used with this endpoint.
"""
return pulumi.get(self, "security_configuration")
@security_configuration.setter
def security_configuration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_configuration", value)
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Security group IDs for the security groups to be used by this endpoint.
"""
return pulumi.get(self, "security_group_ids")
@security_group_ids.setter
def security_group_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "security_group_ids", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The current status of this endpoint.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The subnet ID for the new endpoint to use.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> Optional[pulumi.Input[str]]:
"""
he ID of the VPC used by this endpoint.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpc_id", value)
@property
@pulumi.getter(name="workerType")
def worker_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of predefined worker that is allocated to this endpoint. Accepts a value of Standard, G.1X, or G.2X.
"""
return pulumi.get(self, "worker_type")
@worker_type.setter
def worker_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "worker_type", value)
@property
@pulumi.getter(name="yarnEndpointAddress")
def yarn_endpoint_address(self) -> Optional[pulumi.Input[str]]:
"""
The YARN endpoint address used by this endpoint.
"""
return pulumi.get(self, "yarn_endpoint_address")
@yarn_endpoint_address.setter
def yarn_endpoint_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "yarn_endpoint_address", value)
@property
@pulumi.getter(name="zeppelinRemoteSparkInterpreterPort")
def zeppelin_remote_spark_interpreter_port(self) -> Optional[pulumi.Input[int]]:
"""
The Apache Zeppelin port for the remote Apache Spark interpreter.
"""
return pulumi.get(self, "zeppelin_remote_spark_interpreter_port")
@zeppelin_remote_spark_interpreter_port.setter
def zeppelin_remote_spark_interpreter_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "zeppelin_remote_spark_interpreter_port", value)
class DevEndpoint(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
arguments: Optional[pulumi.Input[Mapping[str, Any]]] = None,
extra_jars_s3_path: Optional[pulumi.Input[str]] = None,
extra_python_libs_s3_path: Optional[pulumi.Input[str]] = None,
glue_version: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
number_of_nodes: Optional[pulumi.Input[int]] = None,
number_of_workers: Optional[pulumi.Input[int]] = None,
public_key: Optional[pulumi.Input[str]] = None,
public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
security_configuration: Optional[pulumi.Input[str]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
worker_type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Glue Development Endpoint resource.
## Example Usage
Basic usage:
```python
import pulumi
import pulumi_aws as aws
example_policy_document = aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
actions=["sts:AssumeRole"],
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="Service",
identifiers=["glue.amazonaws.com"],
)],
)])
example_role = aws.iam.Role("exampleRole", assume_role_policy=example_policy_document.json)
example_dev_endpoint = aws.glue.DevEndpoint("exampleDevEndpoint", role_arn=example_role.arn)
example__aws_glue_service_role = aws.iam.RolePolicyAttachment("example-AWSGlueServiceRole",
policy_arn="arn:aws:iam::aws:policy/service-role/AWSGlueServiceRole",
role=example_role.name)
```
## Import
A Glue Development Endpoint can be imported using the `name`, e.g.
```sh
$ pulumi import aws:glue/devEndpoint:DevEndpoint example foo
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, Any]] arguments: A map of arguments used to configure the endpoint.
:param pulumi.Input[str] extra_jars_s3_path: Path to one or more Java Jars in an S3 bucket that should be loaded in this endpoint.
:param pulumi.Input[str] extra_python_libs_s3_path: Path(s) to one or more Python libraries in an S3 bucket that should be loaded in this endpoint. Multiple values must be complete paths separated by a comma.
:param pulumi.Input[str] glue_version: - Specifies the versions of Python and Apache Spark to use. Defaults to AWS Glue version 0.9.
:param pulumi.Input[str] name: The name of this endpoint. It must be unique in your account.
:param pulumi.Input[int] number_of_nodes: The number of AWS Glue Data Processing Units (DPUs) to allocate to this endpoint. Conflicts with `worker_type`.
:param pulumi.Input[int] number_of_workers: The number of workers of a defined worker type that are allocated to this endpoint. This field is available only when you choose worker type G.1X or G.2X.
:param pulumi.Input[str] public_key: The public key to be used by this endpoint for authentication.
:param pulumi.Input[Sequence[pulumi.Input[str]]] public_keys: A list of public keys to be used by this endpoint for authentication.
:param pulumi.Input[str] role_arn: The IAM role for this endpoint.
:param pulumi.Input[str] security_configuration: The name of the Security Configuration structure to be used with this endpoint.
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_group_ids: Security group IDs for the security groups to be used by this endpoint.
:param pulumi.Input[str] subnet_id: The subnet ID for the new endpoint to use.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[str] worker_type: The type of predefined worker that is allocated to this endpoint. Accepts a value of Standard, G.1X, or G.2X.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DevEndpointArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Glue Development Endpoint resource.
## Example Usage
Basic usage:
```python
import pulumi
import pulumi_aws as aws
example_policy_document = aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
actions=["sts:AssumeRole"],
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="Service",
identifiers=["glue.amazonaws.com"],
)],
)])
example_role = aws.iam.Role("exampleRole", assume_role_policy=example_policy_document.json)
example_dev_endpoint = aws.glue.DevEndpoint("exampleDevEndpoint", role_arn=example_role.arn)
example__aws_glue_service_role = aws.iam.RolePolicyAttachment("example-AWSGlueServiceRole",
policy_arn="arn:aws:iam::aws:policy/service-role/AWSGlueServiceRole",
role=example_role.name)
```
## Import
A Glue Development Endpoint can be imported using the `name`, e.g.
```sh
$ pulumi import aws:glue/devEndpoint:DevEndpoint example foo
```
:param str resource_name: The name of the resource.
:param DevEndpointArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DevEndpointArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
arguments: Optional[pulumi.Input[Mapping[str, Any]]] = None,
extra_jars_s3_path: Optional[pulumi.Input[str]] = None,
extra_python_libs_s3_path: Optional[pulumi.Input[str]] = None,
glue_version: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
number_of_nodes: Optional[pulumi.Input[int]] = None,
number_of_workers: Optional[pulumi.Input[int]] = None,
public_key: Optional[pulumi.Input[str]] = None,
public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
security_configuration: Optional[pulumi.Input[str]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
worker_type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DevEndpointArgs.__new__(DevEndpointArgs)
__props__.__dict__["arguments"] = arguments
__props__.__dict__["extra_jars_s3_path"] = extra_jars_s3_path
__props__.__dict__["extra_python_libs_s3_path"] = extra_python_libs_s3_path
__props__.__dict__["glue_version"] = glue_version
__props__.__dict__["name"] = name
__props__.__dict__["number_of_nodes"] = number_of_nodes
__props__.__dict__["number_of_workers"] = number_of_workers
__props__.__dict__["public_key"] = public_key
__props__.__dict__["public_keys"] = public_keys
if role_arn is None and not opts.urn:
raise TypeError("Missing required property 'role_arn'")
__props__.__dict__["role_arn"] = role_arn
__props__.__dict__["security_configuration"] = security_configuration
__props__.__dict__["security_group_ids"] = security_group_ids
__props__.__dict__["subnet_id"] = subnet_id
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["worker_type"] = worker_type
__props__.__dict__["arn"] = None
__props__.__dict__["availability_zone"] = None
__props__.__dict__["failure_reason"] = None
__props__.__dict__["private_address"] = None
__props__.__dict__["public_address"] = None
__props__.__dict__["status"] = None
__props__.__dict__["vpc_id"] = None
__props__.__dict__["yarn_endpoint_address"] = None
__props__.__dict__["zeppelin_remote_spark_interpreter_port"] = None
super(DevEndpoint, __self__).__init__(
'aws:glue/devEndpoint:DevEndpoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arguments: Optional[pulumi.Input[Mapping[str, Any]]] = None,
arn: Optional[pulumi.Input[str]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
extra_jars_s3_path: Optional[pulumi.Input[str]] = None,
extra_python_libs_s3_path: Optional[pulumi.Input[str]] = None,
failure_reason: Optional[pulumi.Input[str]] = None,
glue_version: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
number_of_nodes: Optional[pulumi.Input[int]] = None,
number_of_workers: Optional[pulumi.Input[int]] = None,
private_address: Optional[pulumi.Input[str]] = None,
public_address: Optional[pulumi.Input[str]] = None,
public_key: Optional[pulumi.Input[str]] = None,
public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
security_configuration: Optional[pulumi.Input[str]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
status: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
worker_type: Optional[pulumi.Input[str]] = None,
yarn_endpoint_address: Optional[pulumi.Input[str]] = None,
zeppelin_remote_spark_interpreter_port: Optional[pulumi.Input[int]] = None) -> 'DevEndpoint':
"""
Get an existing DevEndpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, Any]] arguments: A map of arguments used to configure the endpoint.
:param pulumi.Input[str] arn: The ARN of the endpoint.
:param pulumi.Input[str] availability_zone: The AWS availability zone where this endpoint is located.
:param pulumi.Input[str] extra_jars_s3_path: Path to one or more Java Jars in an S3 bucket that should be loaded in this endpoint.
:param pulumi.Input[str] extra_python_libs_s3_path: Path(s) to one or more Python libraries in an S3 bucket that should be loaded in this endpoint. Multiple values must be complete paths separated by a comma.
:param pulumi.Input[str] failure_reason: The reason for a current failure in this endpoint.
:param pulumi.Input[str] glue_version: - Specifies the versions of Python and Apache Spark to use. Defaults to AWS Glue version 0.9.
:param pulumi.Input[str] name: The name of this endpoint. It must be unique in your account.
:param pulumi.Input[int] number_of_nodes: The number of AWS Glue Data Processing Units (DPUs) to allocate to this endpoint. Conflicts with `worker_type`.
:param pulumi.Input[int] number_of_workers: The number of workers of a defined worker type that are allocated to this endpoint. This field is available only when you choose worker type G.1X or G.2X.
:param pulumi.Input[str] private_address: A private IP address to access the endpoint within a VPC, if this endpoint is created within one.
:param pulumi.Input[str] public_address: The public IP address used by this endpoint. The PublicAddress field is present only when you create a non-VPC endpoint.
:param pulumi.Input[str] public_key: The public key to be used by this endpoint for authentication.
:param pulumi.Input[Sequence[pulumi.Input[str]]] public_keys: A list of public keys to be used by this endpoint for authentication.
:param pulumi.Input[str] role_arn: The IAM role for this endpoint.
:param pulumi.Input[str] security_configuration: The name of the Security Configuration structure to be used with this endpoint.
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_group_ids: Security group IDs for the security groups to be used by this endpoint.
:param pulumi.Input[str] status: The current status of this endpoint.
:param pulumi.Input[str] subnet_id: The subnet ID for the new endpoint to use.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[str] vpc_id: he ID of the VPC used by this endpoint.
:param pulumi.Input[str] worker_type: The type of predefined worker that is allocated to this endpoint. Accepts a value of Standard, G.1X, or G.2X.
:param pulumi.Input[str] yarn_endpoint_address: The YARN endpoint address used by this endpoint.
:param pulumi.Input[int] zeppelin_remote_spark_interpreter_port: The Apache Zeppelin port for the remote Apache Spark interpreter.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DevEndpointState.__new__(_DevEndpointState)
__props__.__dict__["arguments"] = arguments
__props__.__dict__["arn"] = arn
__props__.__dict__["availability_zone"] = availability_zone
__props__.__dict__["extra_jars_s3_path"] = extra_jars_s3_path
__props__.__dict__["extra_python_libs_s3_path"] = extra_python_libs_s3_path
__props__.__dict__["failure_reason"] = failure_reason
__props__.__dict__["glue_version"] = glue_version
__props__.__dict__["name"] = name
__props__.__dict__["number_of_nodes"] = number_of_nodes
__props__.__dict__["number_of_workers"] = number_of_workers
__props__.__dict__["private_address"] = private_address
__props__.__dict__["public_address"] = public_address
__props__.__dict__["public_key"] = public_key
__props__.__dict__["public_keys"] = public_keys
__props__.__dict__["role_arn"] = role_arn
__props__.__dict__["security_configuration"] = security_configuration
__props__.__dict__["security_group_ids"] = security_group_ids
__props__.__dict__["status"] = status
__props__.__dict__["subnet_id"] = subnet_id
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["vpc_id"] = vpc_id
__props__.__dict__["worker_type"] = worker_type
__props__.__dict__["yarn_endpoint_address"] = yarn_endpoint_address
__props__.__dict__["zeppelin_remote_spark_interpreter_port"] = zeppelin_remote_spark_interpreter_port
return DevEndpoint(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arguments(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
A map of arguments used to configure the endpoint.
"""
return pulumi.get(self, "arguments")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN of the endpoint.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> pulumi.Output[str]:
"""
The AWS availability zone where this endpoint is located.
"""
return pulumi.get(self, "availability_zone")
@property
@pulumi.getter(name="extraJarsS3Path")
def extra_jars_s3_path(self) -> pulumi.Output[Optional[str]]:
"""
Path to one or more Java Jars in an S3 bucket that should be loaded in this endpoint.
"""
return pulumi.get(self, "extra_jars_s3_path")
@property
@pulumi.getter(name="extraPythonLibsS3Path")
def extra_python_libs_s3_path(self) -> pulumi.Output[Optional[str]]:
"""
Path(s) to one or more Python libraries in an S3 bucket that should be loaded in this endpoint. Multiple values must be complete paths separated by a comma.
"""
return pulumi.get(self, "extra_python_libs_s3_path")
@property
@pulumi.getter(name="failureReason")
def failure_reason(self) -> pulumi.Output[str]:
"""
The reason for a current failure in this endpoint.
"""
return pulumi.get(self, "failure_reason")
@property
@pulumi.getter(name="glueVersion")
def glue_version(self) -> pulumi.Output[Optional[str]]:
"""
- Specifies the versions of Python and Apache Spark to use. Defaults to AWS Glue version 0.9.
"""
return pulumi.get(self, "glue_version")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of this endpoint. It must be unique in your account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="numberOfNodes")
def number_of_nodes(self) -> pulumi.Output[Optional[int]]:
"""
The number of AWS Glue Data Processing Units (DPUs) to allocate to this endpoint. Conflicts with `worker_type`.
"""
return pulumi.get(self, "number_of_nodes")
@property
@pulumi.getter(name="numberOfWorkers")
def number_of_workers(self) -> pulumi.Output[Optional[int]]:
"""
The number of workers of a defined worker type that are allocated to this endpoint. This field is available only when you choose worker type G.1X or G.2X.
"""
return pulumi.get(self, "number_of_workers")
@property
@pulumi.getter(name="privateAddress")
def private_address(self) -> pulumi.Output[str]:
"""
A private IP address to access the endpoint within a VPC, if this endpoint is created within one.
"""
return pulumi.get(self, "private_address")
@property
@pulumi.getter(name="publicAddress")
def public_address(self) -> pulumi.Output[str]:
"""
The public IP address used by this endpoint. The PublicAddress field is present only when you create a non-VPC endpoint.
"""
return pulumi.get(self, "public_address")
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> pulumi.Output[Optional[str]]:
"""
The public key to be used by this endpoint for authentication.
"""
return pulumi.get(self, "public_key")
@property
@pulumi.getter(name="publicKeys")
def public_keys(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of public keys to be used by this endpoint for authentication.
"""
return pulumi.get(self, "public_keys")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Output[str]:
"""
The IAM role for this endpoint.
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter(name="securityConfiguration")
def security_configuration(self) -> pulumi.Output[Optional[str]]:
"""
The name of the Security Configuration structure to be used with this endpoint.
"""
return pulumi.get(self, "security_configuration")
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Security group IDs for the security groups to be used by this endpoint.
"""
return pulumi.get(self, "security_group_ids")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The current status of this endpoint.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Output[Optional[str]]:
"""
The subnet ID for the new endpoint to use.
"""
return pulumi.get(self, "subnet_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
return pulumi.get(self, "tags_all")
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Output[str]:
"""
he ID of the VPC used by this endpoint.
"""
return pulumi.get(self, "vpc_id")
@property
@pulumi.getter(name="workerType")
def worker_type(self) -> pulumi.Output[Optional[str]]:
"""
The type of predefined worker that is allocated to this endpoint. Accepts a value of Standard, G.1X, or G.2X.
"""
return pulumi.get(self, "worker_type")
@property
@pulumi.getter(name="yarnEndpointAddress")
def yarn_endpoint_address(self) -> pulumi.Output[str]:
"""
The YARN endpoint address used by this endpoint.
"""
return pulumi.get(self, "yarn_endpoint_address")
@property
@pulumi.getter(name="zeppelinRemoteSparkInterpreterPort")
def zeppelin_remote_spark_interpreter_port(self) -> pulumi.Output[int]:
"""
The Apache Zeppelin port for the remote Apache Spark interpreter.
"""
return pulumi.get(self, "zeppelin_remote_spark_interpreter_port")
| 47.860364
| 249
| 0.663683
| 7,058
| 55,183
| 4.962312
| 0.039813
| 0.097362
| 0.085941
| 0.069095
| 0.936101
| 0.921197
| 0.899897
| 0.886335
| 0.877341
| 0.847419
| 0
| 0.002958
| 0.234329
| 55,183
| 1,152
| 250
| 47.90191
| 0.825973
| 0.328942
| 0
| 0.782222
| 1
| 0
| 0.104422
| 0.029158
| 0
| 0
| 0
| 0
| 0
| 1
| 0.168889
| false
| 0.001481
| 0.007407
| 0.004444
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
cbde54f59b773c4146be57e07e6e764738200430
| 13,752
|
py
|
Python
|
src/omuse/community/qgmodel/test_bc.py
|
merijn/omuse
|
696936c211b64c3d5c10674782f9f5ba01cdcfe3
|
[
"Apache-2.0"
] | 12
|
2020-03-25T10:02:00.000Z
|
2021-11-18T00:28:35.000Z
|
src/omuse/community/qgmodel/test_bc.py
|
merijn/omuse
|
696936c211b64c3d5c10674782f9f5ba01cdcfe3
|
[
"Apache-2.0"
] | 45
|
2020-03-03T16:07:16.000Z
|
2022-03-14T09:01:07.000Z
|
src/omuse/community/qgmodel/test_bc.py
|
merijn/omuse
|
696936c211b64c3d5c10674782f9f5ba01cdcfe3
|
[
"Apache-2.0"
] | 8
|
2020-03-03T13:28:50.000Z
|
2021-05-26T09:20:02.000Z
|
import numpy
from amuse.units import units
from amuse.datamodel import Grid
from interface import QGmodel,QGmodelWithRefinements,jans_wind_model
from matplotlib import pyplot
#import logging
#logging.basicConfig(level=logging.DEBUG)
#logging.getLogger("code").setLevel(logging.DEBUG)
def reference(tend=1. | units.hour,dt=3600. | units.s):
q=QGmodel(redirection="none")
q.parameters.dt=dt
q.evolve_model(tend)
psi=q.grid[:,:,0].psi.number
return psi
def interface_bc(tend=1. | units.hour,dt=3600. | units.s,correct=True):
q=QGmodel(redirection="none")
q.parameters.dt=dt
q.parameters.xbound1="interface"
while q.model_time<tend:
q.evolve_model(q.model_time+dt)
if correct:
xb1=q.boundaries(1).copy()
xb1.psi[0,1:-1,0]=-q.grid.psi[1,:,0]
channel=xb1.new_channel_to(q.boundaries(1))
channel.copy()
psi=q.grid[:,:,0].psi.number
return psi
def test1():
tend=192. | units.hour
psi1=reference(tend)
psi2=interface_bc(tend)
d=abs(psi2-psi1)
print(d.max(),d.mean(),abs(psi1).max())
pyplot.ion()
f=pyplot.figure(figsize=(12,4))
pyplot.show()
f.clf()
f1=pyplot.subplot(131)
f1.imshow(psi1.transpose()/psi1.max(),vmin=0,vmax=1,origin="lower")
f2=pyplot.subplot(132)
f2.imshow(psi2.transpose()/psi1.max(),vmin=0,vmax=1,origin="lower")
f3=pyplot.subplot(133)
f3.imshow(abs(psi2-psi1).transpose(),vmin=0,origin="lower")
pyplot.draw()
raw_input()
def semi_domain_test(tend=1. | units.hour,dt=3600. | units.s):
q1=QGmodel(redirection="none")
q1.parameters.dt=dt/2
Lx=q1.parameters.Lx.value_in(1000*units.km)
q2=QGmodel(redirection="none")
q2.parameters.dt=dt/2
q2.parameters.Lx/=2
q2.parameters.boundary_east="interface"
Nx,Ny,Nm=q1.grid.shape
pyplot.ion()
f=pyplot.figure(figsize=(12,5))
pyplot.show()
i=0
while q1.model_time<tend:
i=i+1
tnow=q1.model_time
q1.evolve_model(tnow+dt/2)
psi=q1.grid[(Nx-1)/2:(Nx-1)/2+2,:,0].psi
dpsi_dt=q1.grid[(Nx-1)/2:(Nx-1)/2+2,:,0].dpsi_dt
west=q2.boundaries("west").copy()
west[:,1:-1,0].psi=psi
west[:,1:-1,0].dpsi_dt=dpsi_dt
channel=west.new_channel_to(q2.boundaries("west"))
channel.copy()
q1.evolve_model(tnow+dt)
q2.evolve_model(tnow+dt)
# print(q1.grid[(Nx+1)/2,1,0].dpsi_dt)
# print(q2.boundaries("west")[0:2,0,0].x)
if i%5==0:
psi1_complete=q1.grid.psi.number[:,:,0]
psi1=q1.grid[:(Nx+1)/2,:,0].psi.number
psi2=q2.grid[:,:,0].psi.number
d=abs(psi2-psi1)
print(d.max(),d.mean(),abs(psi1).max())
f.clf()
f1=pyplot.subplot(131)
f1.imshow(psi1_complete.transpose()/psi1.max(),vmin=0,vmax=1,extent=[0,Lx,0,Lx],origin="lower")
f1.set_xlabel("x (x1000 km)")
f2=pyplot.subplot(132)
f2.imshow(psi2.transpose()/psi1.max(),vmin=0,vmax=1,extent=[0,Lx/2,0,Lx],origin="lower")
f2.set_xlabel("x (x1000 km)")
f3=pyplot.subplot(133)
f3.imshow(d.transpose()/psi1.max(),vmin=0,vmax=0.001,extent=[0,Lx/2,0,Lx],origin="lower")
f3.set_xlabel("x (x1000 km)")
pyplot.draw()
pyplot.savefig("snapshots/half_domain_test-%6.6i.png"%i)
raw_input()
def semi_domain_test_interpolation(tend=1. | units.hour,dt=3600. | units.s):
q1=QGmodel(redirection="none")
q1.parameters.dt=dt/2
Lx=q1.parameters.Lx.value_in(1000*units.km)
dx=q1.parameters.dx
q2=QGmodel(redirection="none")
q2.parameters.dt=dt/2
q2.parameters.Lx/=2
q2.parameters.boundary_east="interface"
Nx,Ny,Nm=q1.grid.shape
pyplot.ion()
f=pyplot.figure(figsize=(12,5))
pyplot.show()
i=0
while q1.model_time<tend:
i=i+1
tnow=q1.model_time
q1.evolve_model(tnow+dt/2)
west=q2.boundaries("west").copy()
x=west[:,1:-1,0].x.flatten()
y=west[:,1:-1,0].y.flatten()
psi,dpsi_dt=q1.get_psi_state_at_point(0.*x+dx,x,y)
west[:,1:-1,0].psi=psi.reshape(west[:,1:-1,0].shape)
west[:,1:-1,0].dpsi_dt=dpsi_dt.reshape(west[:,1:-1,0].shape)
channel=west.new_channel_to(q2.boundaries("west"))
channel.copy()
q1.evolve_model(tnow+dt)
q2.evolve_model(tnow+dt)
# print(q1.grid[(Nx+1)/2,1,0].dpsi_dt)
# print(q2.boundaries("west")[0:2,0,0].x)
if i%5==0:
psi1_complete=q1.grid.psi.number[:,:,0]
psi1=q1.grid[:(Nx+1)/2,:,0].psi.number
psi2=q2.grid[:,:,0].psi.number
d=abs(psi2-psi1)
print(d.max(),d.mean(),abs(psi1).max())
f.clf()
f1=pyplot.subplot(131)
f1.imshow(psi1_complete.transpose()/psi1.max(),vmin=0,vmax=1,extent=[0,Lx,0,Lx],origin="lower")
f1.set_xlabel("x (x1000 km)")
f2=pyplot.subplot(132)
f2.imshow(psi2.transpose()/psi1.max(),vmin=0,vmax=1,extent=[0,Lx/2,0,Lx],origin="lower")
f2.set_xlabel("x (x1000 km)")
f3=pyplot.subplot(133)
f3.imshow(d.transpose()/psi1.max(),vmin=0,vmax=0.001,extent=[0,Lx/2,0,Lx],origin="lower")
f3.set_xlabel("x (x1000 km)")
pyplot.draw()
pyplot.savefig("snapshots/half_domain_test-%6.6i.png"%i)
raw_input()
def semi_domain_test_multires(tend=1. | units.hour,dt=3600. | units.s):
q1=QGmodel(redirection="none")
q1.parameters.dt=dt/2
Lx=q1.parameters.Lx.value_in(1000*units.km)
dx=q1.parameters.dx*8
q1.parameters.dx=dx
q1.parameters.dy=dx
q2=QGmodel(redirection="none")
q2.parameters.dt=dt/2
q2.parameters.Lx/=2
q2.parameters.boundary_east="interface"
Nx,Ny,Nm=q1.grid.shape
pyplot.ion()
f=pyplot.figure(figsize=(12,5))
pyplot.show()
i=0
while q1.model_time<tend:
i=i+1
tnow=q1.model_time
q1.evolve_model(tnow+dt/2)
west=q2.boundaries("west").copy()
x=west[:,1:-1,0].x.flatten()
y=west[:,1:-1,0].y.flatten()
psi,dpsi_dt=q1.get_psi_state_at_point(0.*x+dx,x,y)
west[:,1:-1,0].psi=psi.reshape(west[:,1:-1,0].shape)
west[:,1:-1,0].dpsi_dt=dpsi_dt.reshape(west[:,1:-1,0].shape)
channel=west.new_channel_to(q2.boundaries("west"))
channel.copy()
q1.evolve_model(tnow+dt)
q2.evolve_model(tnow+dt)
# print(q1.grid[(Nx+1)/2,1,0].dpsi_dt)
# print(q2.boundaries("west")[0:2,0,0].x)
if i%5==0:
psi1_complete=q1.grid.psi.number[:,:,0]
psi1=q1.grid[:(Nx+1)/2,:,0].psi.number
psi2=q2.grid[:,:,0].psi.number
# d=abs(psi2-psi1)
# print(d.max(),d.mean(),abs(psi1).max())
f.clf()
f1=pyplot.subplot(131)
f1.imshow(psi1_complete.transpose()/psi1.max(),vmin=0,vmax=1,extent=[0,Lx,0,Lx],origin="lower")
f1.set_xlabel("x (x1000 km)")
f2=pyplot.subplot(132)
f2.imshow(psi2.transpose()/psi1.max(),vmin=0,vmax=1,extent=[0,Lx/2,0,Lx],origin="lower")
f2.set_xlabel("x (x1000 km)")
# f3=pyplot.subplot(133)
# f3.imshow(d.transpose()/psi1.max(),vmin=0,vmax=0.001,extent=[0,Lx/2,0,Lx],origin="lower")
# f3.set_xlabel("x (x1000 km)")
pyplot.draw()
pyplot.savefig("snapshots/half_domain_test-%6.6i.png"%i)
raw_input()
def test_evolve_w_plot(sysfac,tend=1. | units.hour,dt=3600. | units.s,dtplot=None):
sys=sysfac()
if dtplot is None:
dtplot=dt
pyplot.ion()
f=pyplot.figure(figsize=(10,10))
pyplot.show()
i=0
Lx=sys.parameters.Lx
grid=Grid.create((400,400), (Lx,Lx))
dx,dy=grid.cellsize()
Lx=Lx.value_in(1000*units.km)
x=grid.x.flatten()
y=grid.y.flatten()
while sys.model_time<tend-dtplot/2:
i=i+1
sys.evolve_model(sys.model_time+dtplot,dt=dt)
psi,dpsi=sys.get_psi_dpsidt(dx+0.*x,x,y)
psi=psi.reshape(grid.shape)
# psi=sys.grid[:,:,0].psi
f.clf()
f1=pyplot.subplot(111)
f1.imshow(psi.transpose()/psi.max(),vmin=0,vmax=1,extent=[0,Lx,0,Lx],origin="lower")
f1.set_xlabel("x (x1000 km)")
pyplot.draw()
pyplot.savefig("test_bc.png")
if i%100==25:
print("wait")
raw_input()
print("done")
raw_input()
def no_refinement(dt=3600. | units.s): # east refers to direction of the boundary
q1=QGmodelWithRefinements(redirection="none")
q1.parameters.dt=dt
# q1.parameters.dx*=8
# q1.parameters.dy*=8
return q1
def refinement_east(dt=3600. | units.s): # east refers to direction of the boundary
q1=QGmodelWithRefinements(redirection="none")
q1.parameters.dt=dt/2
q1.parameters.dx*=8
q1.parameters.dy*=8
Lx=q1.parameters.Lx
dx=q1.parameters.dx
q1.add_refinement(parameters=dict(dt=dt/2,Lx=Lx/2,dx=dx/8,dy=dx/8))
return q1
def refinement_west(dt=3600. | units.s): # east refers to direction of the boundary
q1=QGmodelWithRefinements(redirection="none")
q1.parameters.dt=dt/2
q1.parameters.dx*=8
q1.parameters.dy*=8
Lx=q1.parameters.Lx
dx=q1.parameters.dx
q1.add_refinement(parameters=dict(dt=dt/2,Lx=Lx/2,dx=dx/8,dy=dx/8),
position=[Lx/2,0.*Lx])
return q1
def refinement_north(dt=3600. | units.s):
q1=QGmodelWithRefinements(redirection="none")
q1.parameters.dt=dt/2
q1.parameters.dx*=8
q1.parameters.dy*=8
q1.parameters.interface_wind=True
Ly=q1.parameters.Ly
dx=q1.parameters.dx
tau=q1.parameters.tau
q2=q1.add_refinement(parameters=dict(dt=dt/2,Ly=Ly/2,dx=dx/8,dy=dx/8))
def wind_function(x,y):
return jans_wind_model(x,y,Ly,tau)
q1.set_wind(wind_function)
q2.set_wind(wind_function)
return q1
def refinement_south(dt=3600. | units.s):
q1=QGmodelWithRefinements(redirection="none")
q1.parameters.dt=dt/2
q1.parameters.dx*=8
q1.parameters.dy*=8
q1.parameters.interface_wind=True
Ly=q1.parameters.Ly
dx=q1.parameters.dx
tau=q1.parameters.tau
q2=q1.add_refinement(parameters=dict(dt=dt/2,Ly=Ly/2,dx=dx/8,dy=dx/8),
position=[0| units.m, Ly/2])
def wind_function(x,y):
return jans_wind_model(x,y,Ly,tau)
q1.set_wind(wind_function)
q2.set_wind(wind_function)
return q1
def refinement_south_west(dt=3600. | units.s):
q1=QGmodelWithRefinements(redirection="none")
q1.parameters.dt=dt/2
q1.parameters.dx*=8
q1.parameters.dy*=8
q1.parameters.interface_wind=True
Ly=q1.parameters.Ly
Lx=q1.parameters.Lx
dx=q1.parameters.dx
tau=q1.parameters.tau
q2=q1.add_refinement(parameters=dict(dt=dt/2,Ly=Ly/2,Lx=Lx/2,dx=dx/8,dy=dx/8),
position=[0| units.m, Ly/2])
def wind_function(x,y):
return jans_wind_model(x,y,Ly,tau)
q1.set_wind(wind_function)
q2.set_wind(wind_function)
return q1
def refinement_north_east_south(dt=3600. | units.s):
q1=QGmodelWithRefinements(redirection="none")
q1.parameters.dt=dt/2
q1.parameters.dx*=8
q1.parameters.dy*=8
q1.parameters.interface_wind=True
Ly=q1.parameters.Ly
Lx=q1.parameters.Lx
dx=q1.parameters.dx
tau=q1.parameters.tau
q2=q1.add_refinement(parameters=dict(dt=dt/2,Ly=Ly/2,Lx=Lx/2,dx=dx/8,dy=dx/8),
position=[0| units.m, Ly/4])
def wind_function(x,y):
return jans_wind_model(x,y,Ly,tau)
q1.set_wind(wind_function)
q2.set_wind(wind_function)
return q1
def refinement_central(dt=3600. | units.s):
q1=QGmodelWithRefinements(redirection="none")
q1.parameters.dt=dt/2
q1.parameters.dx*=8
q1.parameters.dy*=8
q1.parameters.interface_wind=True
Ly=q1.parameters.Ly
Lx=q1.parameters.Lx
dx=q1.parameters.dx
tau=q1.parameters.tau
q2=q1.add_refinement(parameters=dict(dt=dt/2,Ly=Ly/2,Lx=Lx/2,dx=dx/8,dy=dx/8),
position=[Lx/4, Ly/4])
def wind_function(x,y):
return jans_wind_model(x,y,Ly,tau)
q1.set_wind(wind_function)
q2.set_wind(wind_function)
return q1
def nested_refinement(dt=3600. | units.s):
q1=QGmodelWithRefinements(redirection="none")
q1.parameters.dt=dt/2
q1.parameters.dx*=8
q1.parameters.dy*=8
q1.parameters.interface_wind=True
Ly=q1.parameters.Ly
Lx=q1.parameters.Lx
dx=q1.parameters.dx
tau=q1.parameters.tau
q2=q1.add_refinement(parameters=dict(dt=dt/2,Lx=Lx/2,dx=dx/2,dy=dx/2),
position=[0*Lx, 0*Ly])
q3=q2.add_refinement(parameters=dict(dt=dt/4,Ly=Ly/2,Lx=Lx/2,dx=dx/8,dy=dx/8),
position=[0*Lx/4, 0*Ly])
def wind_function(x,y):
return jans_wind_model(x,y,Ly,tau)
q1.set_wind(wind_function)
q2.set_wind(wind_function)
q3.set_wind(wind_function)
return q1
def dual_refinement(dt=3600. | units.s):
q1=QGmodelWithRefinements(redirection="none")
q1.parameters.dt=dt/2
q1.parameters.dx*=8
q1.parameters.dy*=8
q1.parameters.interface_wind=True
Ly=q1.parameters.Ly
Lx=q1.parameters.Lx
dx=q1.parameters.dx
tau=q1.parameters.tau
q2=q1.add_refinement(parameters=dict(dt=dt/2,Ly=2*Ly/3,Lx=Lx/8,dx=dx/8,dy=dx/8),
position=[Lx/8, Ly/4])
q3=q1.add_refinement(parameters=dict(dt=dt/4,Ly=Ly/8,Lx=2*Lx/3,dx=dx/8,dy=dx/8),
position=[Lx/4, Ly/8])
def wind_function(x,y):
return jans_wind_model(x,y,Ly,tau)
q1.set_wind(wind_function)
q2.set_wind(wind_function)
q3.set_wind(wind_function)
return q1
def refinement_rectangle(dt=3600. | units.s):
q1=QGmodelWithRefinements(redirection="none")
q1.parameters.dt=dt/2
q1.parameters.dx*=8
q1.parameters.dy*=8
q1.parameters.interface_wind=True
Ly=q1.parameters.Ly
Lx=q1.parameters.Lx
dx=q1.parameters.dx
tau=q1.parameters.tau
q2=q1.add_refinement(parameters=dict(dt=dt/2,Ly=Ly/2,Lx=Lx/4,dx=dx/8,dy=dx/8),
position=[Lx/4, Ly/4])
def wind_function(x,y):
return jans_wind_model(x,y,Ly,tau)
q1.set_wind(wind_function)
q2.set_wind(wind_function)
return q1
if __name__=="__main__":
dt=1800 | units.s
def sysfac():
return nested_refinement(dt)
test_evolve_w_plot(sysfac,tend=1000*dt,dt=dt,dtplot=4*dt)
| 25.898305
| 101
| 0.653578
| 2,345
| 13,752
| 3.743284
| 0.073348
| 0.1162
| 0.01481
| 0.038961
| 0.870016
| 0.854751
| 0.835156
| 0.831397
| 0.813511
| 0.778082
| 0
| 0.070526
| 0.165867
| 13,752
| 530
| 102
| 25.94717
| 0.694708
| 0.055047
| 0
| 0.770667
| 0
| 0
| 0.033834
| 0.008324
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072
| false
| 0
| 0.013333
| 0.024
| 0.144
| 0.013333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1dc111d21530d856759a98f13deb3c39057e602d
| 368
|
py
|
Python
|
__init__.py
|
64-B1T/robot_collisions
|
57a30477a0fb80f3d2a522d0c59dae9580ca3155
|
[
"MIT"
] | null | null | null |
__init__.py
|
64-B1T/robot_collisions
|
57a30477a0fb80f3d2a522d0c59dae9580ca3155
|
[
"MIT"
] | null | null | null |
__init__.py
|
64-B1T/robot_collisions
|
57a30477a0fb80f3d2a522d0c59dae9580ca3155
|
[
"MIT"
] | null | null | null |
import sys
import os
sys.path.append(os.path.dirname(__file__))
from collision_manager import createBox, createCylinder, createSphere, createMesh
from collision_manager import ColliderManager
from collision_manager import ColliderObject
from collision_manager import ColliderArm
from collision_manager import ColliderSP
from collision_manager import ColliderObstacles
| 36.8
| 81
| 0.883152
| 44
| 368
| 7.159091
| 0.431818
| 0.247619
| 0.380952
| 0.495238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 368
| 9
| 82
| 40.888889
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.888889
| 0
| 0.888889
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1de66a0e709c47c983d8e115455a5bd0bff1cb69
| 165
|
py
|
Python
|
wires.py
|
zackmdavis/Wires
|
871634da9949917b4cf9e2f608abe76c72343106
|
[
"MIT"
] | 1
|
2016-08-24T23:51:41.000Z
|
2016-08-24T23:51:41.000Z
|
wires.py
|
zackmdavis/Wires
|
871634da9949917b4cf9e2f608abe76c72343106
|
[
"MIT"
] | null | null | null |
wires.py
|
zackmdavis/Wires
|
871634da9949917b4cf9e2f608abe76c72343106
|
[
"MIT"
] | null | null | null |
from serpentine_record.serpentine_record import *
from wires_core.request_handler import *
from wires_core.server import *
from wires_core.template_engine import *
| 27.5
| 49
| 0.848485
| 23
| 165
| 5.782609
| 0.478261
| 0.225564
| 0.338346
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10303
| 165
| 5
| 50
| 33
| 0.898649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
383231b490e1ccd98fd0705c96fe4ec46a7c6800
| 63,813
|
py
|
Python
|
coop_cms/tests/test_fragments.py
|
BenjCherpas/coop_cms
|
3e50990fdff6ce186509fca7f8b8f3b3134005f1
|
[
"BSD-3-Clause"
] | null | null | null |
coop_cms/tests/test_fragments.py
|
BenjCherpas/coop_cms
|
3e50990fdff6ce186509fca7f8b8f3b3134005f1
|
[
"BSD-3-Clause"
] | null | null | null |
coop_cms/tests/test_fragments.py
|
BenjCherpas/coop_cms
|
3e50990fdff6ce186509fca7f8b8f3b3134005f1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""test fragments feature"""
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
try:
from django.urls import reverse
except:
from django.core.urlresolvers import reverse
from django.template import Template, Context
from model_mommy import mommy
from colorbox.utils import assert_popup_refresh
from coop_cms.forms import ArticleForm
from coop_cms.models import BaseArticle, Fragment, FragmentType, FragmentFilter
from coop_cms.settings import get_article_class
from coop_cms.tests import BaseTestCase, BeautifulSoup
class BaseFragmentTest(BaseTestCase):
"""base class for fragments test"""
def __init__(self, *args, **kwargs):
super(BaseFragmentTest, self).__init__(*args, **kwargs)
self.user = None
def setUp(self):
"""before each test"""
super(BaseFragmentTest, self).setUp()
self._default_article_templates = settings.COOP_CMS_ARTICLE_TEMPLATES
settings.COOP_CMS_ARTICLE_TEMPLATES = (
('test/article_with_fragments.html', 'Article with fragments'),
('test/article_with_fragments_extra_id.html', 'Article with fragments extra id'),
('test/article_with_fragments_template.html', 'Article with fragments template'),
)
def tearDown(self):
"""after each test"""
super(BaseFragmentTest, self).tearDown()
#restore
settings.COOP_CMS_ARTICLE_TEMPLATES = self._default_article_templates
def _log_as_editor(self):
"""_log as editor"""
self.user = user = User.objects.create_user('toto', 'toto@toto.fr', 'toto')
content_type1 = ContentType.objects.get_for_model(get_article_class())
content_type2 = ContentType.objects.get_for_model(Fragment)
for content_type in (content_type1, content_type2):
perm = 'change_{0}'.format(content_type.model)
can_edit = Permission.objects.get(content_type=content_type, codename=perm)
user.user_permissions.add(can_edit)
perm = 'add_{0}'.format(content_type.model)
can_add = Permission.objects.get(content_type=content_type, codename=perm)
user.user_permissions.add(can_add)
user.is_active = True
user.save()
return self.client.login(username='toto', password='toto')
def _log_as_regular_user(self):
"""log a reguar user"""
user = User.objects.create_user('titi', 'titi@toto.fr', 'titi')
#ContentType.objects.get_for_model(get_article_class())
user.is_active = True
user.save()
return self.client.login(username='titi', password='titi')
class FragmentsTest(BaseFragmentTest):
"""Test fragments"""
editable_field_tpl = '<div class="inline-editable" id="html_editor_html_editor__coop_cms__Fragment__id__{0}__content">' + \
'{1}</div>\n<input type="hidden" id="html_editor_html_editor__coop_cms__Fragment__id__{0}__content_hidden" ' + \
'name="html_editor__coop_cms__Fragment__id__{0}__content" value="{1}" />'
def test_fragment_position(self):
"""test position is taken into account"""
fragment_type1 = mommy.make(FragmentType)
fragment_type2 = mommy.make(FragmentType)
fragment1 = mommy.make(Fragment, type=fragment_type1)
fragment2 = mommy.make(Fragment, type=fragment_type1)
fragment3 = mommy.make(Fragment, type=fragment_type1)
fragment4 = mommy.make(Fragment, type=fragment_type1)
fragment_g1 = mommy.make(Fragment, type=fragment_type2)
fragment_g2 = mommy.make(Fragment, type=fragment_type2)
fragment_g3 = mommy.make(Fragment, type=fragment_type2)
fragment5 = mommy.make(Fragment, type=fragment_type1)
for idx, elt in enumerate([fragment1, fragment2, fragment3, fragment4, fragment5]):
self.assertEqual(idx+1, elt.position)
for idx, elt in enumerate([fragment_g1, fragment_g2, fragment_g3]):
self.assertEqual(idx+1, elt.position)
def test_fragment_position_extra_id(self):
"""test position is taken into account when extra id is defined"""
fragment_type1 = mommy.make(FragmentType)
fragment_type2 = mommy.make(FragmentType)
fragment_filter1 = mommy.make(FragmentFilter)
fragment_filter2 = mommy.make(FragmentFilter)
fragments_1 = [
mommy.make(Fragment, type=fragment_type1, filter=fragment_filter1),
mommy.make(Fragment, type=fragment_type1, filter=fragment_filter1),
mommy.make(Fragment, type=fragment_type1, filter=fragment_filter1),
mommy.make(Fragment, type=fragment_type1, filter=fragment_filter2),
mommy.make(Fragment, type=fragment_type1, filter=fragment_filter1),
mommy.make(Fragment, type=fragment_type1),
mommy.make(Fragment, type=fragment_type1),
]
fragments_2 = [
mommy.make(Fragment, type=fragment_type2, filter=fragment_filter1),
mommy.make(Fragment, type=fragment_type2, filter=fragment_filter2),
mommy.make(Fragment, type=fragment_type2, filter=fragment_filter2),
]
for idx, elt in enumerate([fragments_1[0], fragments_1[1], fragments_1[2], fragments_1[4]]):
self.assertEqual(idx+1, elt.position)
for idx, elt in enumerate([fragments_1[3]]):
self.assertEqual(idx+1, elt.position)
for idx, elt in enumerate([fragments_2[0]]):
self.assertEqual(idx+1, elt.position)
for idx, elt in enumerate([fragments_2[1], fragments_2[2]]):
self.assertEqual(idx+1, elt.position)
for idx, elt in enumerate([fragments_1[5], fragments_1[6]]):
self.assertEqual(idx+1, elt.position)
def test_fragment_position_update(self):
"""test position can be modified"""
fragment_type1 = mommy.make(FragmentType)
mommy.make(FragmentType)
fragment1 = mommy.make(Fragment, type=fragment_type1)
fragment2 = mommy.make(Fragment, type=fragment_type1)
fragment3 = mommy.make(Fragment, type=fragment_type1)
fragment1.save()
fragment2.save()
fragment3.save()
for idx, elt in enumerate([fragment1, fragment2, fragment3]):
self.assertEqual(idx+1, elt.position)
def test_view_fragments(self):
"""test view fragments"""
ft_name = "contacts"
fragment_type1 = mommy.make(FragmentType, name=ft_name)
fragment1 = mommy.make(Fragment, type=fragment_type1, content="Azerty")
fragment2 = mommy.make(Fragment, type=fragment_type1, content="Qsdfgh")
fragment3 = mommy.make(Fragment, type=fragment_type1, content="Wxcvbn")
tpl = Template('{% load coop_edition %}{% coop_fragments ft_name %}')
html = tpl.render(Context({"ft_name": ft_name}))
positions = [html.find('{0}'.format(f.content)) for f in [fragment1, fragment2, fragment3]]
for pos in positions:
self.assertTrue(pos >= 0)
sorted_positions = positions[:]
sorted_positions.sort()
self.assertEqual(positions, sorted_positions)
def test_view_fragments_extra_id_in_edit_mode(self):
"""in edit mode: coop-fragment-type"""
ft_name = "contacts"
fragment_type1 = mommy.make(FragmentType, name=ft_name)
fragment_filter1 = mommy.make(FragmentFilter, extra_id="1")
fragment_filter2 = mommy.make(FragmentFilter, extra_id="2")
fragments = [
mommy.make(Fragment, type=fragment_type1, content="Azerty", filter=fragment_filter1),
mommy.make(Fragment, type=fragment_type1, content="Qsdfgh", filter=fragment_filter1),
mommy.make(Fragment, type=fragment_type1, content="Wxcvbn", filter=fragment_filter2),
mommy.make(Fragment, type=fragment_type1, content="Zsxdrg", filter=None),
]
article = mommy.make(get_article_class(), title='test')
tpl = Template('{% load coop_edition %}{% coop_fragments ft_name x %}')
html = tpl.render(Context({"ft_name": ft_name, "x": 1, "form": ArticleForm(instance=article)}))
positions = [html.find('{0}'.format(f.content)) for f in [fragments[0], fragments[1]]]
for pos in positions:
self.assertTrue(pos >= 0)
self.assertEqual(positions, sorted(positions))
soup = BeautifulSoup(html)
ft_tags = soup.select(".coop-fragment-type")
self.assertEqual(len(ft_tags), 1)
ft_tag = ft_tags[0]
self.assertEqual(ft_tag['rel'], str(fragment_type1.id))
self.assertEqual(ft_tag['data-filter'], str(fragment_filter1.id))
for frag in [fragments[2], fragments[3]]:
self.assertTrue(html.find(frag.content) < 0)
def test_view_fragments_extra_id_in_view_mode(self):
"""in view mode: no coop-fragment-type"""
ft_name = "contacts"
fragment_type1 = mommy.make(FragmentType, name=ft_name)
fragment_filter1 = mommy.make(FragmentFilter, extra_id="1")
fragment_filter2 = mommy.make(FragmentFilter, extra_id="2")
fragments = [
mommy.make(Fragment, type=fragment_type1, content="Azerty", filter=fragment_filter1),
mommy.make(Fragment, type=fragment_type1, content="Qsdfgh", filter=fragment_filter1),
mommy.make(Fragment, type=fragment_type1, content="Wxcvbn", filter=fragment_filter2),
mommy.make(Fragment, type=fragment_type1, content="Zsxdrg", filter=None),
]
tpl = Template('{% load coop_edition %}{% coop_fragments ft_name x %}')
html = tpl.render(Context({"ft_name": ft_name, "x": 1}))
positions = [html.find('{0}'.format(f.content)) for f in [fragments[0], fragments[1]]]
for pos in positions:
self.assertTrue(pos >= 0)
self.assertEqual(positions, sorted(positions))
soup = BeautifulSoup(html)
ft_tags = soup.select(".coop-fragment-type")
self.assertEqual(len(ft_tags), 0)
for frag in [fragments[2], fragments[3]]:
self.assertTrue(html.find(frag.content) < 0)
def test_fragments_with_extra_id(self):
"""test fragments with extra id"""
ft_name = "contacts"
tpl = Template('{% load coop_edition %}{% coop_fragments ft_name x %}')
tpl.render(Context({"ft_name": ft_name, 'x': 2}))
self.assertEqual(FragmentType.objects.count(), 1)
self.assertEqual(FragmentType.objects.filter(name=ft_name).count(), 1)
self.assertEqual(FragmentFilter.objects.count(), 1)
self.assertEqual(FragmentFilter.objects.filter(extra_id='2').count(), 1)
def test_view_fragments_name_as_string(self):
"""test fragments name hardcode in templatetag"""
fragment_type1 = mommy.make(FragmentType, name="contacts")
fragment1 = mommy.make(Fragment, type=fragment_type1, content="Azerty")
fragment2 = mommy.make(Fragment, type=fragment_type1, content="Qsdfgh")
fragment3 = mommy.make(Fragment, type=fragment_type1, content="Wxcvbn")
tpl = Template('{% load coop_edition %}{% coop_fragments "contacts" %}')
html = tpl.render(Context({"ft_name": "contacts"}))
positions = [html.find('{0}'.format(f.content)) for f in [fragment1, fragment2, fragment3]]
for pos in positions:
self.assertTrue(pos >= 0)
sorted_positions = positions[:]
sorted_positions.sort()
self.assertEqual(positions, sorted_positions)
def test_view_fragments_args_as_string(self):
"""test name and extra id hardcoded"""
fragment_type1 = mommy.make(FragmentType, name="contacts")
fragment_filter1 = mommy.make(FragmentFilter, extra_id="hello")
fragment_filter2 = mommy.make(FragmentFilter, extra_id="2")
fragment1 = mommy.make(Fragment, type=fragment_type1, content="Azerty", filter=fragment_filter1)
fragment2 = mommy.make(Fragment, type=fragment_type1, content="Qsdfgh", filter=fragment_filter1)
fragment3 = mommy.make(Fragment, type=fragment_type1, content="Wxcvbn", filter=fragment_filter2)
fragment4 = mommy.make(Fragment, type=fragment_type1, content="Zsxdrg", filter=None)
tpl = Template('{% load coop_edition %}{% coop_fragments "contacts" "hello" %}')
html = tpl.render(Context({"ft_name": "contacts"}))
positions = [html.find('{0}'.format(f.content)) for f in [fragment1, fragment2]]
for pos in positions:
self.assertTrue(pos >= 0)
sorted_positions = positions[:]
sorted_positions.sort()
self.assertEqual(positions, sorted_positions)
for frag in [fragment3, fragment4]:
self.assertTrue(html.find(frag.content) < 0)
def test_view_fragments_order(self):
"""test fragments displayed in position order"""
ft_name = "contacts"
fragment_type1 = mommy.make(FragmentType, name=ft_name)
fragment1 = mommy.make(Fragment, type=fragment_type1, content="Azerty", position=3)
fragment2 = mommy.make(Fragment, type=fragment_type1, content="Qsdfgh", position=1)
fragment3 = mommy.make(Fragment, type=fragment_type1, content="Wxcvbn", position=2)
tpl = Template('{% load coop_edition %}{% coop_fragments ft_name %}')
html = tpl.render(Context({"ft_name": ft_name}))
positions = [html.find('{0}'.format(f.content)) for f in [fragment2, fragment3, fragment1]]
for pos in positions:
self.assertTrue(pos >= 0)
sorted_positions = positions[:]
sorted_positions.sort()
self.assertEqual(positions, sorted_positions)
def test_view_only_specified_fragments(self):
"""test display only right fragements"""
ft_name = "contacts"
fragment_type1 = mommy.make(FragmentType, name=ft_name)
fragment_type2 = mommy.make(FragmentType, name="AAAA")
fragment1 = mommy.make(Fragment, type=fragment_type1, content="Azerty")
fragment2 = mommy.make(Fragment, type=fragment_type1, content="Qsdfgh")
fragment3 = mommy.make(Fragment, type=fragment_type1, content="Wxcvbn")
fragment_g1 = mommy.make(Fragment, type=fragment_type2, content="POIUYT")
tpl = Template('{% load coop_edition %}{% coop_fragments ft_name %}')
html = tpl.render(Context({"ft_name": ft_name}))
positions = [html.find('{0}'.format(f.content)) for f in [fragment2, fragment3, fragment1]]
for pos in positions:
self.assertTrue(pos >= 0)
positions = [html.find('{0}'.format(f.content)) for f in [fragment_g1]]
for pos in positions:
self.assertTrue(pos == -1)
def test_view_only_specified_extra_id(self):
"""text extra_id is taken into account"""
ft_name = "contacts"
fragment_type1 = mommy.make(FragmentType, name=ft_name)
fragment_type2 = mommy.make(FragmentType, name="AAAA")
fragment_filter1 = mommy.make(FragmentFilter, extra_id="hello")
fragment_filter2 = mommy.make(FragmentFilter, extra_id="2")
fragments = [
mommy.make(Fragment, type=fragment_type1, content="Azerty", filter=fragment_filter1),
mommy.make(Fragment, type=fragment_type1, content="Qsdfgh", filter=fragment_filter1),
mommy.make(Fragment, type=fragment_type1, content="Wxcvbn", filter=fragment_filter2),
mommy.make(Fragment, type=fragment_type1, content="Zsxdrg", filter=None),
mommy.make(Fragment, type=fragment_type2, content="POIUYT", filter=fragment_filter1),
]
tpl = Template('{% load coop_edition %}{% coop_fragments ft_name "hello" %}')
html = tpl.render(Context({"ft_name": ft_name}))
positions = [html.find('{0}'.format(f.content)) for f in [fragments[1], fragments[0]]]
for pos in positions:
self.assertTrue(pos >= 0)
positions = [html.find('{0}'.format(f.content)) for f in [fragments[4], fragments[2], fragments[3]]]
for pos in positions:
self.assertTrue(pos == -1)
def test_view_extra_id_named_args(self):
"""text extra_id is taken into account extra_id is given as named arg"""
ft_name = "contacts"
fragment_type1 = mommy.make(FragmentType, name=ft_name)
fragment_type2 = mommy.make(FragmentType, name="AAAA")
fragment_filter1 = mommy.make(FragmentFilter, extra_id="hello")
fragment_filter2 = mommy.make(FragmentFilter, extra_id="2")
fragments = [
mommy.make(Fragment, type=fragment_type1, content="Azerty", filter=fragment_filter1),
mommy.make(Fragment, type=fragment_type1, content="Qsdfgh", filter=fragment_filter1),
mommy.make(Fragment, type=fragment_type1, content="Wxcvbn", filter=fragment_filter2),
mommy.make(Fragment, type=fragment_type1, content="Zsxdrg", filter=None),
mommy.make(Fragment, type=fragment_type2, content="POIUYT", filter=fragment_filter1),
]
tpl = Template('{% load coop_edition %}{% coop_fragments ft_name extra_id="hello" %}')
html = tpl.render(Context({"ft_name": ft_name}))
positions = [html.find('{0}'.format(f.content)) for f in [fragments[1], fragments[0]]]
for pos in positions:
self.assertTrue(pos >= 0)
positions = [html.find('{0}'.format(f.content)) for f in [fragments[4], fragments[2], fragments[3]]]
for pos in positions:
self.assertTrue(pos == -1)
def test_view_extra_id_named_args_end(self):
"""text extra_id is taken into account extra_id is given as named arg in last position"""
ft_name = "contacts"
fragment_type1 = mommy.make(FragmentType, name=ft_name)
fragment_type2 = mommy.make(FragmentType, name="AAAA")
fragment_filter1 = mommy.make(FragmentFilter, extra_id="hello")
fragment_filter2 = mommy.make(FragmentFilter, extra_id="2")
fragments = [
mommy.make(Fragment, type=fragment_type1, content="Azerty", filter=fragment_filter1),
mommy.make(Fragment, type=fragment_type1, content="Qsdfgh", filter=fragment_filter1),
mommy.make(Fragment, type=fragment_type1, content="Wxcvbn", filter=fragment_filter2),
mommy.make(Fragment, type=fragment_type1, content="Zsxdrg", filter=None),
mommy.make(Fragment, type=fragment_type2, content="POIUYT", filter=fragment_filter1),
]
tpl = Template(
'{% load coop_edition %}{% coop_fragments ft_name template_name="test/_fragment.html" extra_id="hello" %}'
)
html = tpl.render(Context({"ft_name": ft_name}))
positions = [html.find('{0}'.format(f.content)) for f in [fragments[1], fragments[0]]]
for pos in positions:
self.assertTrue(pos >= 0)
positions = [html.find('{0}'.format(f.content)) for f in [fragments[4], fragments[2], fragments[3]]]
for pos in positions:
self.assertTrue(pos == -1)
def test_view_fragments_edit_mode(self):
"""test view in edit mode"""
ft_name = "contacts"
fragment_type1 = mommy.make(FragmentType, name=ft_name)
fragment_type2 = mommy.make(FragmentType, name="AAAA")
fragments = [
mommy.make(Fragment, type=fragment_type1, content="Azerty"),
mommy.make(Fragment, type=fragment_type1, content="Qsdfgh"),
mommy.make(Fragment, type=fragment_type1, content="Wxcvbn"),
mommy.make(Fragment, type=fragment_type2, content="POIUYT"),
]
article = mommy.make(get_article_class(), title='test')
tpl = Template('{% load coop_edition %}{% coop_fragments ft_name %}')
html = tpl.render(Context({"ft_name": ft_name, "form": ArticleForm(instance=article)}))
positions = [
html.find(self.editable_field_tpl.format(f.id, f.content))
for f in [fragments[0], fragments[1], fragments[2]]
]
for pos in positions:
self.assertTrue(pos >= 0)
self.assertEqual(positions, sorted(positions))
positions = [html.find(self.editable_field_tpl.format(f.id, f.content)) for f in [fragments[3]]]
for pos in positions:
self.assertTrue(pos == -1)
def test_view_fragments_extra_id_edit_mode(self):
"""test view with extra id in edit mode"""
ft_name = "contacts"
fragment_type1 = mommy.make(FragmentType, name=ft_name)
fragment_type2 = mommy.make(FragmentType, name="AAAA")
fragment_filter1 = mommy.make(FragmentFilter, extra_id="hello")
fragment_filter2 = mommy.make(FragmentFilter, extra_id="2")
article = mommy.make(get_article_class(), title='test')
fragments = [
mommy.make(Fragment, type=fragment_type1, content="Azerty", filter=fragment_filter1),
mommy.make(Fragment, type=fragment_type1, content="Qsdfgh", filter=fragment_filter1),
mommy.make(Fragment, type=fragment_type1, content="Wxcvbn", filter=fragment_filter2),
mommy.make(Fragment, type=fragment_type1, content="Zsxdrg", filter=None),
mommy.make(Fragment, type=fragment_type2, content="POIUYT")
]
tpl = Template('{% load coop_edition %}{% coop_fragments ft_name "hello" %}')
html = tpl.render(Context({"ft_name": ft_name, "form": ArticleForm(instance=article)}))
positions = [
html.find(self.editable_field_tpl.format(fragment.id, fragment.content))
for fragment in [fragments[0], fragments[1]]
]
for pos in positions:
self.assertTrue(pos >= 0)
self.assertEqual(positions, sorted(positions))
positions = [
html.find(self.editable_field_tpl.format(fragment.id, fragment.content))
for fragment in [fragments[4], fragments[2], fragments[3]]
]
for pos in positions:
self.assertTrue(pos == -1)
def test_fragments_with_template(self):
"""test template_name argument of the template tag"""
ft_name = "contacts"
tpl = Template('{% load coop_edition %}{% coop_fragments ft_name template_name="test/_fragment.html" %}')
html = tpl.render(Context({"ft_name": ft_name}))
self.assertEqual(FragmentType.objects.count(), 1)
self.assertEqual(FragmentType.objects.filter(name=ft_name).count(), 1)
soup = BeautifulSoup(html)
self.assertEqual(0, len(soup.select('.panel')))
def test_view_fragments_with_template(self):
"""test view with template_name arguement"""
ft_name = "contacts"
fragment_type = mommy.make(FragmentType, name=ft_name)
mommy.make(Fragment, type=fragment_type)
tpl = Template('{% load coop_edition %}{% coop_fragments ft_name template_name="test/_fragment.html" %}')
html = tpl.render(Context({"ft_name": ft_name}))
self.assertEqual(FragmentType.objects.count(), 1)
self.assertEqual(FragmentType.objects.filter(name=ft_name).count(), 1)
soup = BeautifulSoup(html)
self.assertEqual(1, len(soup.select('.panel')))
def test_view_fragments_template_edit_mode(self):
"""test with template_name in edit_mode"""
ft_name = "contacts"
fragment_type = mommy.make(FragmentType, name=ft_name)
mommy.make(Fragment, type=fragment_type)
article = mommy.make(get_article_class(), title='test')
tpl = Template('{% load coop_edition %}{% coop_fragments ft_name template_name="test/_fragment.html" %}')
html = tpl.render(Context({"ft_name": ft_name, "form": ArticleForm(instance=article)}))
self.assertEqual(FragmentType.objects.count(), 1)
self.assertEqual(FragmentType.objects.filter(name=ft_name).count(), 1)
soup = BeautifulSoup(html)
self.assertEqual(1, len(soup.select('.panel')))
self.assertEqual(1, len(soup.select('.panel input')))
self.assertEqual(1, len(soup.select('.panel .inline-editable')))
def test_view_fragments_with_template2(self):
"""test with another template"""
ft_name = "contacts"
fragment_type = mommy.make(FragmentType, name=ft_name)
mommy.make(Fragment, type=fragment_type)
mommy.make(Fragment, type=fragment_type)
tpl = Template('{% load coop_edition %}{% coop_fragments ft_name template_name="test/_fragment.html" %}')
html = tpl.render(Context({"ft_name": ft_name}))
self.assertEqual(FragmentType.objects.count(), 1)
self.assertEqual(FragmentType.objects.filter(name=ft_name).count(), 1)
soup = BeautifulSoup(html)
self.assertEqual(2, len(soup.select('.panel')))
def test_view_fragments_with_template3(self):
"""test with another other template"""
ft_name = "contacts"
fragment_type = mommy.make(FragmentType, name=ft_name)
mommy.make(Fragment, type=fragment_type)
mommy.make(Fragment, type=fragment_type)
article = mommy.make(get_article_class(), title='test')
tpl = Template('{% load coop_edition %}{% coop_fragments ft_name template_name="test/_fragment.html" %}')
html = tpl.render(Context({"ft_name": ft_name, "form": ArticleForm(instance=article)}))
self.assertEqual(FragmentType.objects.count(), 1)
self.assertEqual(FragmentType.objects.filter(name=ft_name).count(), 1)
soup = BeautifulSoup(html)
self.assertEqual(3, len(soup.select('.panel'))) # 1 extra panel if_cms_edition and fragment index > 0
class FragmentsInArticleTest(BaseFragmentTest):
"""Articles related tests"""
def _check_article(self, response, data):
"""check page content"""
for value in data.values():
self.assertContains(response, value)
def test_view_article_no_fragments(self):
"""view article with no Fragment"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
article = get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
response = self.client.get(article.get_absolute_url())
self.assertEqual(200, response.status_code)
self.assertEqual(1, FragmentType.objects.count())
self.assertEqual("parts", FragmentType.objects.all()[0].name)
def test_view_article_with_fragments(self):
"""view article with Fragment"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
article = get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
fragment_type = mommy.make(FragmentType, name="parts")
fragment1 = mommy.make(Fragment, type=fragment_type, content="Azertyuiop")
response = self.client.get(article.get_absolute_url())
self.assertEqual(200, response.status_code)
self.assertContains(response, fragment1.content)
def test_view_article_with_fragments_extra_id(self):
"""view article with Fragment and extra_id"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[1][0]
article = get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
fragment_type = mommy.make(FragmentType, name="parts")
fragment_filter1 = mommy.make(FragmentFilter, extra_id=str(article.id))
fragment_filter2 = mommy.make(FragmentFilter, extra_id="hello")
fragment1 = mommy.make(Fragment, type=fragment_type, content="Azertyuiop", filter=fragment_filter1)
fragment2 = mommy.make(Fragment, type=fragment_type, content="QSDFGHJKLM", filter=fragment_filter2)
fragment3 = mommy.make(Fragment, type=fragment_type, content="Wxcvbn,;:=", filter=None)
response = self.client.get(article.get_absolute_url())
self.assertEqual(200, response.status_code)
self.assertContains(response, fragment1.content)
self.assertNotContains(response, fragment2.content)
self.assertNotContains(response, fragment3.content)
def test_view_article_with_fragment_with_css(self):
"""view article with Fragment and css"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
article = get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
fragment_type = mommy.make(FragmentType, name="parts")
fragment1 = mommy.make(Fragment, type=fragment_type, content="Azertyuiop", css_class="this-is-my-fragment")
response = self.client.get(article.get_absolute_url())
self.assertEqual(200, response.status_code)
self.assertContains(response, fragment1.content)
soup = BeautifulSoup(response.content)
fragment = soup.select("div."+fragment1.css_class)[0]
self.assertEqual(fragment1.content, fragment.text)
def test_edit_article_no_fragments(self):
"""edit article with no Fragment"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
article = get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
data = {"title": 'salut', 'content': 'bonjour!'}
self._log_as_editor()
response = self.client.post(article.get_edit_url(), data=data, follow=True)
self.assertEqual(response.status_code, 200)
self._check_article(response, data)
data = {"title": 'bye', 'content': 'au revoir'}
response = self.client.post(article.get_edit_url(), data=data, follow=True)
self.assertEqual(response.status_code, 200)
self._check_article(response, data)
def test_edit_article_with_fragments(self):
"""edit article with Fragment"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
article = get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
fragment_type = mommy.make(FragmentType, name="parts")
fragment1 = mommy.make(Fragment, type=fragment_type, content="Azertyuiop")
new_fragment1_content = "Qsdfghjklm"
data = {
"title": 'salut',
'content': 'bonjour!',
'html_editor__coop_cms__Fragment__id__{0}__content'.format(fragment1.id): new_fragment1_content,
}
self._log_as_editor()
response = self.client.post(article.get_edit_url(), data=data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, data['title'])
self.assertContains(response, data['content'])
self.assertContains(response, new_fragment1_content)
def test_edit_article_with_fragments_extra_id(self):
"""edit article with Fragment and extra_id"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[1][0]
article = get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
fragment_type = mommy.make(FragmentType, name="parts")
fragment_filter = mommy.make(FragmentFilter, extra_id=str(article.id))
fragment1 = mommy.make(Fragment, type=fragment_type, content="Azertyuiop", filter=fragment_filter)
new_fragment1_content = "Qsdfghjklm"
data = {
"title": 'salut',
'content': 'bonjour!',
'html_editor__coop_cms__Fragment__id__{0}__content'.format(fragment1.id): new_fragment1_content,
}
self._log_as_editor()
response = self.client.post(article.get_edit_url(), data=data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, data['title'])
self.assertContains(response, data['content'])
self.assertContains(response, new_fragment1_content)
def test_view_add_fragment(self):
"""can view add fragment"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
self._log_as_editor()
url = reverse("coop_cms_add_fragment")
response = self.client.get(url)
self.assertEqual(200, response.status_code)
def test_view_add_fragment_check_filters(self):
"""add fragment check filters"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[1][0]
article = get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
self._log_as_editor()
url = article.get_edit_url()
response = self.client.get(url)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
ft_tags = soup.select(".coop-fragment-type")
ft_objs = FragmentType.objects.all()
ff_objs = FragmentFilter.objects.all()
self.assertEqual(len(ft_tags), 2)
self.assertEqual(ft_objs.count(), 2)
self.assertEqual(ff_objs.count(), 1)
for i in range(2):
self.assertEqual(int(ft_tags[i]["rel"]), ft_objs[i].id)
self.assertEqual(ft_tags[0]["data-filter"], '')
self.assertEqual(ft_tags[1]["data-filter"], str(ff_objs[0].id))
def test_view_add_fragment_no_filter_check(self):
"""view add fragment"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
article = get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
self._log_as_editor()
url = article.get_edit_url()
response = self.client.get(url)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
ft_tags = soup.select(".coop-fragment-type")
ft_objs = FragmentType.objects.all()
ff_objs = FragmentFilter.objects.all()
self.assertEqual(len(ft_tags), 1)
self.assertEqual(ft_objs.count(), 1)
self.assertEqual(ff_objs.count(), 0)
self.assertEqual(int(ft_tags[0]["rel"]), ft_objs[0].id)
self.assertEqual(ft_tags[0]["data-filter"], '')
def test_view_add_fragment_permission_denied(self):
"""view add fragment not allowed"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
url = reverse("coop_cms_add_fragment")
response = self.client.get(url)
self.assertEqual(302, response.status_code)
self._log_as_regular_user()
response = self.client.get(url)
self.assertEqual(403, response.status_code)
def _add_fragment(self, data, errors_count=0):
"""helper"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
self._log_as_editor()
url = reverse("coop_cms_add_fragment")
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
errs = soup.select("ul.errorlist li")
if errors_count:
self.assertEqual(errors_count, len(errs))
else:
self.assertEqual([], errs)
assert_popup_refresh(response)
return response
def test_add_fragment(self):
"""add fragment"""
fragment_type = mommy.make(FragmentType, name="parts")
data = {
'type': fragment_type.id,
'name': 'abcd',
'position': 0,
'filter': '',
}
self._add_fragment(data)
fragment = Fragment.objects.all()[0]
self.assertEqual(fragment.type, fragment_type)
self.assertEqual(fragment.name, data['name'])
self.assertEqual(fragment.css_class, '')
self.assertEqual(fragment.position, 1)
self.assertEqual(fragment.filter, None)
def test_add_fragment_filter(self):
"""add fragment with filter"""
fragment_type = mommy.make(FragmentType, name="parts")
fragment_filter = mommy.make(FragmentFilter, extra_id="2")
data = {
'type': fragment_type.id,
'name': 'abcd',
'position': 0,
'filter': fragment_filter.id
}
self._add_fragment(data)
fragment = Fragment.objects.all()[0]
self.assertEqual(fragment.type, fragment_type)
self.assertEqual(fragment.name, data['name'])
self.assertEqual(fragment.css_class, '')
self.assertEqual(fragment.position, 1)
self.assertEqual(fragment.filter, fragment_filter)
def test_add_fragment_position(self):
"""add fragment with position"""
fragment_type = mommy.make(FragmentType, name="parts")
data = {
'type': fragment_type.id,
'name': 'abcd',
'position': 2,
'filter': '',
}
self._add_fragment(data)
fragment = Fragment.objects.all()[0]
self.assertEqual(fragment.type, fragment_type)
self.assertEqual(fragment.name, data['name'])
self.assertEqual(fragment.css_class, '')
self.assertEqual(fragment.position, 2)
def test_add_fragment_invalid_filter(self):
"""add fragment invalid filter"""
fragment_type = mommy.make(FragmentType, name="parts")
data = {
'type': fragment_type.id,
'name': 'abcd',
'position': 2,
'filter': '0',
}
self._add_fragment(data, 1)
self.assertEqual(0, Fragment.objects.count())
def test_add_fragment_one_css(self):
"""add fragment css."""
fragment_type = mommy.make(FragmentType, name="parts", allowed_css_classes="col-1,first-line")
data = {
'type': fragment_type.id,
'name': 'abcd',
'css_class': ['col-1'],
'position': 0,
}
self._add_fragment(data)
fragment = Fragment.objects.all()[0]
self.assertEqual(fragment.type, fragment_type)
self.assertEqual(fragment.name, data['name'])
self.assertEqual(fragment.css_class, 'col-1')
self.assertEqual(fragment.position, 1)
def test_add_fragment_two_css(self):
"""add fragment css"""
fragment_type = mommy.make(FragmentType, name="parts", allowed_css_classes="col-1,first-line")
data = {
'type': fragment_type.id,
'name': 'abcd',
'css_class': ['col-1', 'first-line'],
'position': 0,
}
self._add_fragment(data)
fragment = Fragment.objects.all()[0]
self.assertEqual(fragment.type, fragment_type)
self.assertEqual(fragment.name, data['name'])
self.assertEqual(fragment.css_class, 'col-1 first-line')
self.assertEqual(fragment.position, 1)
def test_add_fragment_invalid_css(self):
"""add fragment css"""
fragment_type = mommy.make(FragmentType, name="parts", allowed_css_classes="col-1")
data = {
'type': fragment_type.id,
'name': 'abcd',
'css_class': ['col-1', 'first-line'],
'position': 0,
}
self._add_fragment(data, errors_count=1)
def test_add_fragment_unknown_css(self):
"""add fragment css"""
fragment_type = mommy.make(FragmentType, name="parts")
data = {
'type': fragment_type.id,
'name': 'abcd',
'css_class': 'okidki',
'position': 0,
}
self._add_fragment(data, errors_count=1)
def test_view_add_fragment_no_perm(self):
"""add fragment not allowed"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
fragment_type = mommy.make(FragmentType, name="parts")
data = {
'type': fragment_type,
'name': 'abcd',
'css_class': 'okidoki',
'position': 0,
}
url = reverse("coop_cms_add_fragment")
response = self.client.post(url, data=data, follow=False)
self.assertEqual(302, response.status_code)
next_url = "/accounts/login/?next={0}".format(url)
self.assertTrue(response['Location'].find(next_url) >= 0)
self._log_as_regular_user()
response = self.client.post(url, data=data, follow=False)
self.assertEqual(403, response.status_code)
self.assertEqual(0, Fragment.objects.count())
def test_add_fragment_duplicated(self):
"""add fragment"""
fragment_type = mommy.make(FragmentType, name="parts")
fragment = mommy.make(Fragment, name="abcd", type=fragment_type)
data = {
'type': fragment.type.id,
'name': fragment.name,
'position': 0,
#'filter': '',
}
self._add_fragment(data, errors_count=1)
def test_add_fragment_duplicated_filters(self):
"""add fragment"""
fragment_type = mommy.make(FragmentType, name="parts")
fragment_filter1 = mommy.make(FragmentFilter)
fragment = mommy.make(Fragment, name=u"abcd", type=fragment_type, filter=fragment_filter1)
data = {
'type': fragment.type.id,
'name': fragment.name,
'position': 0,
'filter': fragment_filter1.id,
}
self._add_fragment(data, errors_count=1)
def test_add_fragment_duplicated_different_filters(self):
"""add fragment"""
fragment_type = mommy.make(FragmentType, name="parts")
fragment_filter1 = mommy.make(FragmentFilter)
fragment_filter2 = mommy.make(FragmentFilter)
fragment = mommy.make(Fragment, name=u"abcd", type=fragment_type, filter=fragment_filter1)
data = {
'type': fragment.type.id,
'name': fragment.name,
'position': 0,
'filter': fragment_filter2.id,
}
self._add_fragment(data)
self.assertEqual(Fragment.objects.count(), 2)
self.assertEqual(Fragment.objects.exclude(id=fragment.id).count(), 1)
new_fragment = Fragment.objects.exclude(id=fragment.id)[0]
self.assertEqual(new_fragment.type, fragment_type)
self.assertEqual(new_fragment.name, data['name'])
self.assertEqual(new_fragment.css_class, '')
self.assertEqual(new_fragment.filter, fragment_filter2)
def test_view_edit_fragments_empty(self):
"""view edit fragment form: no fragments yet"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
self._log_as_editor()
url = reverse("coop_cms_edit_fragments")
response = self.client.get(url)
self.assertEqual(200, response.status_code)
def test_view_edit_fragments(self):
"""view edit fragment form"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
fragment1 = mommy.make(Fragment, name="azerty")
fragment2 = mommy.make(Fragment, name="qwerty")
self._log_as_editor()
url = reverse("coop_cms_edit_fragments")
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertContains(response, fragment1.name)
self.assertContains(response, fragment2.name)
def test_view_edit_fragments_perms(self):
"""view edit fragment form: not allowed"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
url = reverse("coop_cms_edit_fragments")
response = self.client.get(url)
self.assertEqual(302, response.status_code)
self._log_as_regular_user()
response = self.client.get(url)
self.assertEqual(403, response.status_code)
def test_edit_fragment(self):
"""edit fragment"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
fragment_type1 = mommy.make(FragmentType)
fragment_type2 = mommy.make(FragmentType)
fragment1 = mommy.make(Fragment, name="azerty", type=fragment_type1)
fragment2 = mommy.make(Fragment, name="qwerty", type=fragment_type2)
data = {
'form-0-id': fragment1.id,
'form-0-type': fragment1.type.id,
'form-0-name': fragment1.name + "!",
'form-0-css_class': "",
'form-0-position': 5,
'form-0-delete_me': False,
'form-1-id': fragment2.id,
'form-1-type': fragment2.type.id,
'form-1-name': fragment2.name + "+",
'form-1-css_class': "",
'form-1-position': 2,
'form-1-delete_me': False,
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 2,
'form-MAX_NUM_FORMS': 2
}
self._log_as_editor()
url = reverse("coop_cms_edit_fragments")
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
errs = soup.select("ul.errorlist li")
self.assertEqual([], errs)
assert_popup_refresh(response)
self.assertEqual(2, Fragment.objects.count())
fragment1 = Fragment.objects.get(id=fragment1.id)
fragment2 = Fragment.objects.get(id=fragment2.id)
self.assertEqual(fragment1.type, fragment_type1)
self.assertEqual(fragment1.name, "azerty!")
self.assertEqual(fragment1.css_class, "")
self.assertEqual(fragment1.position, 5)
self.assertEqual(fragment2.type, fragment_type2)
self.assertEqual(fragment2.name, "qwerty+")
self.assertEqual(fragment2.css_class, "")
self.assertEqual(fragment2.position, 2)
def test_edit_fragment_css_allowed(self):
"""edit fragment: css is allowed"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
fragment_type1 = mommy.make(FragmentType, allowed_css_classes="oups")
fragment_type2 = mommy.make(FragmentType, allowed_css_classes="aaa,bbb")
fragment1 = mommy.make(Fragment, name="azerty", type=fragment_type1)
fragment2 = mommy.make(Fragment, name="qwerty", type=fragment_type2)
data = {
'form-0-id': fragment1.id,
'form-0-type': fragment1.type.id,
'form-0-name': fragment1.name+"!",
'form-0-css_class': ["oups"],
'form-0-position': 5,
'form-0-delete_me': False,
'form-1-id': fragment2.id,
'form-1-type': fragment2.type.id,
'form-1-name': fragment2.name+"+",
'form-1-css_class': ["aaa", "bbb"],
'form-1-position': 2,
'form-1-delete_me': False,
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 2,
'form-MAX_NUM_FORMS': 2
}
self._log_as_editor()
url = reverse("coop_cms_edit_fragments")
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
errs = soup.select("ul.errorlist li")
self.assertEqual([], errs)
assert_popup_refresh(response)
self.assertEqual(2, Fragment.objects.count())
fragment1 = Fragment.objects.get(id=fragment1.id)
fragment2 = Fragment.objects.get(id=fragment2.id)
self.assertEqual(fragment1.type, fragment_type1)
self.assertEqual(fragment1.name, "azerty!")
self.assertEqual(fragment1.css_class, "oups")
self.assertEqual(fragment1.position, 5)
self.assertEqual(fragment2.type, fragment_type2)
self.assertEqual(fragment2.name, "qwerty+")
self.assertEqual(fragment2.css_class, "aaa bbb")
self.assertEqual(fragment2.position, 2)
def test_edit_fragment_css_not_allowed(self):
"""edit fragment: invalid css"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
fragment_type1 = mommy.make(FragmentType, allowed_css_classes="")
fragment_type2 = mommy.make(FragmentType)
fragment1 = mommy.make(Fragment, name="azerty", type=fragment_type1)
fragment2 = mommy.make(Fragment, name="qwerty", type=fragment_type2)
data = {
'form-0-id': fragment1.id,
'form-0-type': fragment1.type.id,
'form-0-name': fragment1.name+"!",
'form-0-css_class': "oups",
'form-0-position': 5,
'form-0-delete_me': False,
'form-1-id': fragment2.id,
'form-1-type': fragment2.type.id,
'form-1-name': fragment2.name+"+",
'form-1-css_class': "aaa",
'form-1-position': 2,
'form-1-delete_me': False,
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 2,
'form-MAX_NUM_FORMS': 2
}
self._log_as_editor()
url = reverse("coop_cms_edit_fragments")
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
errs = soup.select("ul.errorlist li")
self.assertEqual(2, len(errs))
self.assertEqual(2, Fragment.objects.count())
fragment1 = Fragment.objects.get(id=fragment1.id)
fragment2 = Fragment.objects.get(id=fragment2.id)
self.assertEqual(fragment1.type, fragment_type1)
self.assertEqual(fragment1.name, "azerty")
self.assertEqual(fragment1.css_class, "")
self.assertEqual(fragment2.type, fragment_type2)
self.assertEqual(fragment2.name, "qwerty")
self.assertEqual(fragment2.css_class, "")
def test_edit_fragment_css_not_allowed2(self):
"""edit fragment: invalid css for only 1"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
fragment_type1 = mommy.make(FragmentType, allowed_css_classes="aaa")
fragment_type2 = mommy.make(FragmentType)
fragment1 = mommy.make(Fragment, name="azerty", type=fragment_type1)
fragment2 = mommy.make(Fragment, name="qwerty", type=fragment_type2)
data = {
'form-0-id': fragment1.id,
'form-0-type': fragment1.type.id,
'form-0-name': fragment1.name+"!",
'form-0-css_class': "oups",
'form-0-position': 5,
'form-0-delete_me': False,
'form-1-id': fragment2.id,
'form-1-type': fragment2.type.id,
'form-1-name': fragment2.name+"+",
'form-1-css_class': "aaa",
'form-1-position': 2,
'form-1-delete_me': False,
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 2,
'form-MAX_NUM_FORMS': 2
}
self._log_as_editor()
url = reverse("coop_cms_edit_fragments")
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
errs = soup.select("ul.errorlist li")
self.assertEqual(2, len(errs))
self.assertEqual(2, Fragment.objects.count())
fragment1 = Fragment.objects.get(id=fragment1.id)
fragment2 = Fragment.objects.get(id=fragment2.id)
self.assertEqual(fragment1.type, fragment_type1)
self.assertEqual(fragment1.name, "azerty")
self.assertEqual(fragment1.css_class, "")
self.assertEqual(fragment2.type, fragment_type2)
self.assertEqual(fragment2.name, "qwerty")
self.assertEqual(fragment2.css_class, "")
def test_edit_fragment_css_not_allowed3(self):
"""edit fragment: one invalid css """
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
fragment_type1 = mommy.make(FragmentType, allowed_css_classes="aaa,bbb")
fragment_type2 = mommy.make(FragmentType)
fragment1 = mommy.make(Fragment, name="azerty", type=fragment_type1)
fragment2 = mommy.make(Fragment, name="qwerty", type=fragment_type2)
data = {
'form-0-id': fragment1.id,
'form-0-type': fragment1.type.id,
'form-0-name': fragment1.name+"!",
'form-0-css_class': ["bbb", "oups"],
'form-0-position': 5,
'form-0-delete_me': False,
'form-1-id': fragment2.id,
'form-1-type': fragment2.type.id,
'form-1-name': fragment2.name+"+",
'form-1-css_class': "aaa",
'form-1-position': 2,
'form-1-delete_me': False,
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 2,
'form-MAX_NUM_FORMS': 2
}
self._log_as_editor()
url = reverse("coop_cms_edit_fragments")
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
errs = soup.select("ul.errorlist li")
self.assertEqual(2, len(errs))
self.assertEqual(2, Fragment.objects.count())
fragment1 = Fragment.objects.get(id=fragment1.id)
fragment2 = Fragment.objects.get(id=fragment2.id)
self.assertEqual(fragment1.type, fragment_type1)
self.assertEqual(fragment1.name, "azerty")
self.assertEqual(fragment1.css_class, "")
self.assertEqual(fragment2.type, fragment_type2)
self.assertEqual(fragment2.name, "qwerty")
self.assertEqual(fragment2.css_class, "")
def test_edit_fragment_delete(self):
"""delete fragment"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
fragment_type1 = mommy.make(FragmentType)
fragment_type2 = mommy.make(FragmentType)
fragment1 = mommy.make(Fragment, name="azerty", type=fragment_type1)
fragment2 = mommy.make(Fragment, name="qwerty", type=fragment_type2)
data = {
'form-0-id': fragment1.id,
'form-0-type': fragment1.type.id,
'form-0-name': fragment1.name,
'form-0-css_class': "",
'form-0-position': 5,
'form-0-delete_me': False,
'form-1-id': fragment2.id,
'form-1-type': fragment2.type.id,
'form-1-name': fragment2.name,
'form-1-css_class': "",
'form-1-position': 2,
'form-1-delete_me': True,
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 2,
'form-MAX_NUM_FORMS': 2
}
self._log_as_editor()
url = reverse("coop_cms_edit_fragments")
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
errs = soup.select("ul.errorlist li")
self.assertEqual([], errs)
assert_popup_refresh(response)
self.assertEqual(1, Fragment.objects.count())
fragment1 = Fragment.objects.get(id=fragment1.id)
self.assertEqual(Fragment.objects.filter(id=fragment2.id).count(), 0)
self.assertEqual(fragment1.type, fragment_type1)
self.assertEqual(fragment1.name, "azerty")
self.assertEqual(fragment1.css_class, "")
self.assertEqual(fragment1.position, 5)
def test_edit_fragment_invalid_position(self):
"""edit fragment: invalid pos"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
fragment_type1 = mommy.make(FragmentType)
fragment_type2 = mommy.make(FragmentType)
fragment1 = mommy.make(Fragment, name="azerty", type=fragment_type1)
fragment2 = mommy.make(Fragment, name="qwerty", type=fragment_type2)
data = {
'form-0-id': fragment1.id,
'form-0-type': fragment1.type.id,
'form-0-name': fragment1.name+"!",
'form-0-css_class': "",
'form-0-position': "AAA",
'form-0-delete_me': False,
'form-1-id': fragment2.id,
'form-1-type': fragment2.type.id,
'form-1-name': fragment2.name+"+",
'form-1-css_class': "",
'form-1-position': 2,
'form-1-delete_me': False,
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 2,
'form-MAX_NUM_FORMS': 2
}
self._log_as_editor()
url = reverse("coop_cms_edit_fragments")
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
errs = soup.select("ul.errorlist li")
self.assertEqual(1, len(errs))
def test_edit_fragment_empty_name(self):
"""edit fragment empty name"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
fragment_type1 = mommy.make(FragmentType)
fragment_type2 = mommy.make(FragmentType)
fragment1 = mommy.make(Fragment, name="azerty", type=fragment_type1)
fragment2 = mommy.make(Fragment, name="qwerty", type=fragment_type2)
data = {
'form-0-id': fragment1.id,
'form-0-type': fragment1.type.id,
'form-0-name': "",
'form-0-css_class': "",
'form-0-position': 1,
'form-0-delete_me': False,
'form-1-id': fragment2.id,
'form-1-type': fragment2.type.id,
'form-1-name': fragment2.name+"+",
'form-1-css_class': "",
'form-1-position': 2,
'form-1-delete_me': False,
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 2,
'form-MAX_NUM_FORMS': 2
}
self._log_as_editor()
url = reverse("coop_cms_edit_fragments")
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
errs = soup.select("ul.errorlist li")
self.assertEqual(1, len(errs))
def test_edit_fragment_permission_denied(self):
"""edit fragment: not allowed"""
template = settings.COOP_CMS_ARTICLE_TEMPLATES[0][0]
get_article_class().objects.create(title="test", template=template, publication=BaseArticle.PUBLISHED)
fragment_type1 = mommy.make(FragmentType)
fragment_type2 = mommy.make(FragmentType)
fragment1 = mommy.make(Fragment, name="azerty", type=fragment_type1)
fragment2 = mommy.make(Fragment, name="qwerty", type=fragment_type2)
data = {
'form-0-id': fragment1.id,
'form-0-type': fragment1.type.id,
'form-0-name': fragment1.name+"!",
'form-0-css_class': "",
'form-0-position': 5,
'form-0-delete_me': False,
'form-1-id': fragment2.id,
'form-1-type': fragment2.type.id,
'form-1-name': fragment2.name+"+",
'form-1-css_class': "",
'form-1-position': 2,
'form-1-delete_me': False,
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 2,
'form-MAX_NUM_FORMS': 2
}
url = reverse("coop_cms_edit_fragments")
response = self.client.post(url, data=data, follow=False)
self.assertEqual(302, response.status_code)
self.assertEqual(2, Fragment.objects.count())
fragment1 = Fragment.objects.get(id=fragment1.id)
fragment2 = Fragment.objects.get(id=fragment2.id)
self.assertEqual(fragment1.type, fragment_type1)
self.assertEqual(fragment1.name, "azerty")
self.assertEqual(fragment1.css_class, "")
self.assertEqual(fragment1.position, 1)
self.assertEqual(fragment2.type, fragment_type2)
self.assertEqual(fragment2.name, "qwerty")
self.assertEqual(fragment2.css_class, "")
self.assertEqual(fragment2.position, 1)
self._log_as_regular_user()
response = self.client.post(url, data=data)
self.assertEqual(403, response.status_code)
self.assertEqual(2, Fragment.objects.count())
fragment1 = Fragment.objects.get(id=fragment1.id)
fragment2 = Fragment.objects.get(id=fragment2.id)
self.assertEqual(fragment1.type, fragment_type1)
self.assertEqual(fragment1.name, "azerty")
self.assertEqual(fragment1.css_class, "")
self.assertEqual(fragment1.position, 1)
self.assertEqual(fragment2.type, fragment_type2)
self.assertEqual(fragment2.name, "qwerty")
self.assertEqual(fragment2.css_class, "")
self.assertEqual(fragment2.position, 1)
| 41.196256
| 127
| 0.625641
| 7,219
| 63,813
| 5.340629
| 0.041003
| 0.045987
| 0.04674
| 0.045209
| 0.897131
| 0.869482
| 0.849173
| 0.832521
| 0.810085
| 0.795482
| 0
| 0.020497
| 0.249996
| 63,813
| 1,548
| 128
| 41.222868
| 0.785061
| 0.032172
| 0
| 0.743474
| 0
| 0
| 0.103313
| 0.016703
| 0
| 0
| 0
| 0
| 0.206121
| 1
| 0.056706
| false
| 0.0018
| 0.011701
| 0
| 0.074707
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6985be8ed94994e688837df4b211dd2cd966ef36
| 71,480
|
py
|
Python
|
tools/pythonpkg/tests/fast/substrait/test_ibis_tpch.py
|
lokax/duckdb
|
c2581dfebccaebae9468c924c2c722fcf0306944
|
[
"MIT"
] | 1
|
2022-01-06T17:44:07.000Z
|
2022-01-06T17:44:07.000Z
|
tools/pythonpkg/tests/fast/substrait/test_ibis_tpch.py
|
lokax/duckdb
|
c2581dfebccaebae9468c924c2c722fcf0306944
|
[
"MIT"
] | 2
|
2022-02-16T08:36:03.000Z
|
2022-03-08T17:13:33.000Z
|
tools/pythonpkg/tests/fast/substrait/test_ibis_tpch.py
|
lokax/duckdb
|
c2581dfebccaebae9468c924c2c722fcf0306944
|
[
"MIT"
] | null | null | null |
import duckdb
def get_query_binary(query_number):
if query_number == 1:
return b'\x0A\x02\x08\x01\x12\x0c\x1a\x0A\x08\x01\x10\x01\x1a\x04less\x12\x0c\x1a\x0A\x08\x01\x10\x02\x1a\x04mean\x12\x0c\x1a\x0A\x08\x01\x10\x03\x1a\x04mean\x12\x0D\x1a\x0b\x08\x01\x10\x04\x1a\x05count\x12\x0b\x1a\x09\x08\x01\x10\x05\x1a\x03sum\x12\x10\x1a\x0e\x08\x01\x10\x06\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x07\x1a\x08subtract\x12\x0b\x1a\x09\x08\x01\x10\x08\x1a\x03add\x12\x0b\x1a\x09\x08\x01\x10\x09\x1a\x03sum\x1a\x9d\x07\x12\x9a\x07\x0A\x9b\x06*\x98\x06\x12\xf7\x05"\xf4\x05\x12\xf2\x02\x12\xef\x02\x12\xcc\x02\x0A\xc9\x02\x12\xba\x02\x0A\x0Al_orderkey\x0A\x09l_partkey\x0A\x09l_suppkey\x0A\x0cl_linenumber\x0A\x0Al_quantity\x0A\x0fl_extendedprice\x0A\x0Al_discount\x0A\x05l_tax\x0A\x0cl_returnflag\x0A\x0cl_linestatus\x0A\x0Al_shipdate\x0A\x0cl_commitdate\x0A\x0Dl_receiptdate\x0A\x0el_shipinstruct\x0A\x0Al_shipmode\x0A\x09l_comment\x12l\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01:\x0A\x0A\x08lineitem\x1a\x1e\x1a\x1c\x08\x01\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0A"\x00\x12\x06\x0A\x04\x80\x01\xe7Q\x1a\x04\x0A\x02\x10\x01\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x08"\x00\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x09"\x00"\x1b\x0A\x19\x08\x02\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x06"\x00 \x03*\x07\xc2\x01\x04\x08\x02\x10\x0f"\x1b\x0A\x19\x08\x02\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x05"\x00 \x03*\x07\xc2\x01\x04\x08\x02\x10\x0f"\x18\x0A\x16\x08\x03\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00 \x03*\x04Z\x02\x10\x01"\x0c\x0A\x0A\x08\x04 \x03*\x04:\x02\x10\x01"\x1b\x0A\x19\x08\x05\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x05"\x00 \x03*\x07\xc2\x01\x04\x08\x02\x10&"{\x0Ay\x08\x05\x12j\x1ah\x08\x06\x12:\x1a8\x08\x06\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x05"\x00\x12\x1f\x1a\x1d\x08\x07\x12\x04\x0A\x02\x10\x01\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x06"\x00\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f\x12\x1f\x1a\x1d\x08\x08\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x07"\x00\x12\x04\x0A\x02\x10\x01\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f \x03*\x07\xc2\x01\x04\x08\x02\x10&"K\x0AI\x08\x05\x12:\x1a8\x08\x06\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x05"\x00\x12\x1f\x1a\x1d\x08\x07\x12\x04\x0A\x02\x10\x01\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x06"\x00\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f \x03*\x07\xc2\x01\x04\x08\x02\x10&"\x18\x0A\x16\x08\x09\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00 \x03*\x04:\x02\x10\x01\x1a\x0c\x0A\x08\x12\x06\x0A\x02\x12\x00"\x00\x10\x01\x1a\x0e\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x10\x01\x12\x0cl_returnflag\x12\x0cl_linestatus\x12\x08avg_disc\x12\x09avg_price\x12\x07avg_qty\x12\x0bcount_order\x12\x0esum_base_price\x12\x0Asum_charge\x12\x0esum_disc_price\x12\x07sum_qty'
elif query_number == 3:
return b'\x0A\x02\x08\x01\x12\x0c\x1a\x0A\x08\x01\x10\x01\x1a\x04less\x12\x0c\x1a\x0A\x08\x01\x10\x02\x1a\x04mean\x12\x0c\x1a\x0A\x08\x01\x10\x03\x1a\x04mean\x12\x0D\x1a\x0b\x08\x01\x10\x04\x1a\x05count\x12\x0b\x1a\x09\x08\x01\x10\x05\x1a\x03sum\x12\x10\x1a\x0e\x08\x01\x10\x06\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x07\x1a\x08subtract\x12\x0b\x1a\x09\x08\x01\x10\x08\x1a\x03add\x12\x0b\x1a\x09\x08\x01\x10\x09\x1a\x03sum\x12\x0e\x1a\x0c\x08\x01\x10\x0A\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0b\x1a\x03and\x12\x17\x1a\x15\x08\x01\x10\x0c\x1a\x0fstring_sql_like\x12\x0e\x1a\x0c\x08\x01\x10\x0D\x1a\x06equals\x12\x0e\x1a\x0c\x08\x01\x10\x0e\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0f\x1a\x03min\x12\x0f\x1a\x0D\x08\x01\x10\x10\x1a\x07greater\x1a\xf0\x08\x12\xed\x08\x0A\xb8\x08\x1a\xb5\x08\x12\xb0\x08*\xad\x08\x12\x8a\x08"\x87\x08\x12\x8d\x07\x12\x8a\x07\x12\x89\x062\x86\x06\x12\x8e\x032\x8b\x03\x12\x9f\x01\x0A\x9c\x01\x12\x8d\x01\x0A\x09c_custkey\x0A\x06c_name\x0A\x09c_address\x0A\x0bc_nationkey\x0A\x07c_phone\x0A\x09c_acctbal\x0A\x0cc_mktsegment\x0A\x09c_comment\x123\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01:\x0A\x0A\x08customer\x1a\xc2\x01\x0A\xbf\x01\x12\xb2\x01\x0A\x0Ao_orderkey\x0A\x09o_custkey\x0A\x0Do_orderstatus\x0A\x0co_totalprice\x0A\x0bo_orderdate\x0A\x0fo_orderpriority\x0A\x07o_clerk\x0A\x0eo_shippriority\x0A\x09o_comment\x12:\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01:\x08\x0A\x06orders" \x1a\x1e\x08\x0A\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x09"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1a\xcc\x02\x0A\xc9\x02\x12\xba\x02\x0A\x0Al_orderkey\x0A\x09l_partkey\x0A\x09l_suppkey\x0A\x0cl_linenumber\x0A\x0Al_quantity\x0A\x0fl_extendedprice\x0A\x0Al_discount\x0A\x05l_tax\x0A\x0cl_returnflag\x0A\x0cl_linestatus\x0A\x0Al_shipdate\x0A\x0cl_commitdate\x0A\x0Dl_receiptdate\x0A\x0el_shipinstruct\x0A\x0Al_shipmode\x0A\x09l_comment\x12l\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01:\x0A\x0A\x08lineitem""\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x11"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x08"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1a|\x1az\x08\x0b\x12P\x1aN\x08\x0b\x12$\x1a"\x08\x0D\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x06"\x00\x12\x0c\x0A\x0Ab\x08BUILDING\x1a\x04\x0A\x02\x10\x01\x12\x1e\x1a\x1c\x08\x01\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0c"\x00\x12\x06\x0A\x04\x80\x01\xf4G\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12\x1e\x1a\x1c\x08\x10\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x1b"\x00\x12\x06\x0A\x04\x80\x01\xf4G\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x11"\x00\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x0c"\x00\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x0f"\x00"K\x0AI\x08\x05\x12:\x1a8\x08\x06\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x16"\x00\x12\x1f\x1a\x1d\x08\x07\x12\x04\x0A\x02\x10\x01\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x17"\x00\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f \x03*\x07\xc2\x01\x04\x08\x02\x10&\x1a\x0e\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x03"\x00\x10\x03\x1a\x0e\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x10\x01 \x0A\x12\x0Al_orderkey\x12\x0bo_orderdate\x12\x0eo_shippriority\x12\x07revenue'
elif query_number == 4:
return b'\x0A\x02\x08\x01\x12\x0c\x1a\x0A\x08\x01\x10\x01\x1a\x04less\x12\x0c\x1a\x0A\x08\x01\x10\x02\x1a\x04mean\x12\x0c\x1a\x0A\x08\x01\x10\x03\x1a\x04mean\x12\x0D\x1a\x0b\x08\x01\x10\x04\x1a\x05count\x12\x0b\x1a\x09\x08\x01\x10\x05\x1a\x03sum\x12\x10\x1a\x0e\x08\x01\x10\x06\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x07\x1a\x08subtract\x12\x0b\x1a\x09\x08\x01\x10\x08\x1a\x03add\x12\x0b\x1a\x09\x08\x01\x10\x09\x1a\x03sum\x12\x0e\x1a\x0c\x08\x01\x10\x0A\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0b\x1a\x03and\x12\x17\x1a\x15\x08\x01\x10\x0c\x1a\x0fstring_sql_like\x12\x0e\x1a\x0c\x08\x01\x10\x0D\x1a\x06equals\x12\x0e\x1a\x0c\x08\x01\x10\x0e\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0f\x1a\x03min\x12\x0f\x1a\x0D\x08\x01\x10\x10\x1a\x07greater\x12\x0b\x1a\x09\x08\x01\x10\x11\x1a\x03any\x12\x15\x1a\x13\x08\x01\x10\x12\x1a\x0Dgreater_equal\x1a\xdc\x03\x12\xd9\x03\x0A\xb8\x03"\xb5\x03\x12\x96\x03*\x93\x03\x12\x80\x03\x12\xfd\x02\x12\xc2\x01\x0A\xbf\x01\x12\xb2\x01\x0A\x0Ao_orderkey\x0A\x09o_custkey\x0A\x0Do_orderstatus\x0A\x0co_totalprice\x0A\x0bo_orderdate\x0A\x0fo_orderpriority\x0A\x07o_clerk\x0A\x0eo_shippriority\x0A\x09o_comment\x12:\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01:\x08\x0A\x06orders\x1a\xb5\x01\x1a\xb2\x01\x08\x0b\x12\x87\x01\x1a\x84\x01\x08\x0b\x12Z\x1aX\x08\x11\x12N\x1aL\x08\x0b\x12\x1e\x1a\x1c\x08\x0A\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08\x01\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0b"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0c"\x00\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12\x1e\x1a\x1c\x08\x12\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00\x12\x06\x0A\x04\x80\x01\x86C\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12\x1e\x1a\x1c\x08\x01\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00\x12\x06\x0A\x04\x80\x01\xe2C\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x0e\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x05"\x00\x10\x01\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x05"\x00"\x0c\x0A\x0A\x08\x04 \x03*\x04:\x02\x10\x01\x12\x0fo_orderpriority\x12\x0border_count'
elif query_number == 5:
return b'\x0A\x02\x08\x01\x12\x0c\x1a\x0A\x08\x01\x10\x01\x1a\x04less\x12\x0c\x1a\x0A\x08\x01\x10\x02\x1a\x04mean\x12\x0c\x1a\x0A\x08\x01\x10\x03\x1a\x04mean\x12\x0D\x1a\x0b\x08\x01\x10\x04\x1a\x05count\x12\x0b\x1a\x09\x08\x01\x10\x05\x1a\x03sum\x12\x10\x1a\x0e\x08\x01\x10\x06\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x07\x1a\x08subtract\x12\x0b\x1a\x09\x08\x01\x10\x08\x1a\x03add\x12\x0b\x1a\x09\x08\x01\x10\x09\x1a\x03sum\x12\x0e\x1a\x0c\x08\x01\x10\x0A\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0b\x1a\x03and\x12\x17\x1a\x15\x08\x01\x10\x0c\x1a\x0fstring_sql_like\x12\x0e\x1a\x0c\x08\x01\x10\x0D\x1a\x06equals\x12\x0e\x1a\x0c\x08\x01\x10\x0e\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0f\x1a\x03min\x12\x0f\x1a\x0D\x08\x01\x10\x10\x1a\x07greater\x12\x0b\x1a\x09\x08\x01\x10\x11\x1a\x03any\x12\x15\x1a\x13\x08\x01\x10\x12\x1a\x0Dgreater_equal\x1a\xf3\x0b\x12\xf0\x0b\x0A\xdc\x0b*\xd9\x0b\x12\xc6\x0b"\xc3\x0b\x12\xe5\x0A\x12\xe2\x0A\x12\xe5\x092\xe2\x09\x12\xf5\x082\xf2\x08\x12\xc2\x072\xbf\x07\x12\x89\x062\x86\x06\x12\x8e\x032\x8b\x03\x12\x9f\x01\x0A\x9c\x01\x12\x8d\x01\x0A\x09c_custkey\x0A\x06c_name\x0A\x09c_address\x0A\x0bc_nationkey\x0A\x07c_phone\x0A\x09c_acctbal\x0A\x0cc_mktsegment\x0A\x09c_comment\x123\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01:\x0A\x0A\x08customer\x1a\xc2\x01\x0A\xbf\x01\x12\xb2\x01\x0A\x0Ao_orderkey\x0A\x09o_custkey\x0A\x0Do_orderstatus\x0A\x0co_totalprice\x0A\x0bo_orderdate\x0A\x0fo_orderpriority\x0A\x07o_clerk\x0A\x0eo_shippriority\x0A\x09o_comment\x12:\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01:\x08\x0A\x06orders" \x1a\x1e\x08\x0A\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x09"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1a\xcc\x02\x0A\xc9\x02\x12\xba\x02\x0A\x0Al_orderkey\x0A\x09l_partkey\x0A\x09l_suppkey\x0A\x0cl_linenumber\x0A\x0Al_quantity\x0A\x0fl_extendedprice\x0A\x0Al_discount\x0A\x05l_tax\x0A\x0cl_returnflag\x0A\x0cl_linestatus\x0A\x0Al_shipdate\x0A\x0cl_commitdate\x0A\x0Dl_receiptdate\x0A\x0el_shipinstruct\x0A\x0Al_shipmode\x0A\x09l_comment\x12l\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01:\x0A\x0A\x08lineitem""\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x11"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x08"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1a\x8a\x01\x0A\x87\x01\x12y\x0A\x09s_suppkey\x0A\x06s_name\x0A\x09s_address\x0A\x0bs_nationkey\x0A\x07s_phone\x0A\x09s_acctbal\x0A\x09s_comment\x12-\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01:\x0A\x0A\x08supplier""\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x13"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08!"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1aU\x0AS\x12G\x0A\x0bn_nationkey\x0A\x06n_name\x0A\x0bn_regionkey\x0A\x09n_comment\x12\x18\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01:\x08\x0A\x06nation"R\x1aP\x08\x0b\x12"\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x03"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08$"\x00\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08$"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08("\x00\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x010\x01\x1aB\x0A@\x124\x0A\x0br_regionkey\x0A\x06r_name\x0A\x09r_comment\x12\x12\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01:\x08\x0A\x06region""\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08*"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08,"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1ax\x1av\x08\x0b\x12L\x1aJ\x08\x0b\x12 \x1a\x1e\x08\x0D\x12\x0A\x12\x08\x0A\x04\x12\x02\x08-"\x00\x12\x08\x0A\x06b\x04ASIA\x1a\x04\x0A\x02\x10\x01\x12\x1e\x1a\x1c\x08\x12\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0c"\x00\x12\x06\x0A\x04\x80\x01\xbeD\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12\x1e\x1a\x1c\x08\x01\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0c"\x00\x12\x06\x0A\x04\x80\x01\xabG\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08)"\x00"K\x0AI\x08\x05\x12:\x1a8\x08\x06\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x16"\x00\x12\x1f\x1a\x1d\x08\x07\x12\x04\x0A\x02\x10\x01\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x17"\x00\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f \x03*\x07\xc2\x01\x04\x08\x02\x10&\x1a\x0e\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x10\x03\x12\x06n_name\x12\x07revenue'
elif query_number == 6:
return b'\x0A\x02\x08\x01\x12\x0c\x1a\x0A\x08\x01\x10\x01\x1a\x04less\x12\x0c\x1a\x0A\x08\x01\x10\x02\x1a\x04mean\x12\x0c\x1a\x0A\x08\x01\x10\x03\x1a\x04mean\x12\x0D\x1a\x0b\x08\x01\x10\x04\x1a\x05count\x12\x0b\x1a\x09\x08\x01\x10\x05\x1a\x03sum\x12\x10\x1a\x0e\x08\x01\x10\x06\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x07\x1a\x08subtract\x12\x0b\x1a\x09\x08\x01\x10\x08\x1a\x03add\x12\x0b\x1a\x09\x08\x01\x10\x09\x1a\x03sum\x12\x0e\x1a\x0c\x08\x01\x10\x0A\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0b\x1a\x03and\x12\x17\x1a\x15\x08\x01\x10\x0c\x1a\x0fstring_sql_like\x12\x0e\x1a\x0c\x08\x01\x10\x0D\x1a\x06equals\x12\x0e\x1a\x0c\x08\x01\x10\x0e\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0f\x1a\x03min\x12\x0f\x1a\x0D\x08\x01\x10\x10\x1a\x07greater\x12\x0b\x1a\x09\x08\x01\x10\x11\x1a\x03any\x12\x15\x1a\x13\x08\x01\x10\x12\x1a\x0Dgreater_equal\x12\x0f\x1a\x0D\x08\x01\x10\x13\x1a\x07between\x12\x0c\x1a\x0A\x08\x01\x10\x14\x1a\x04less\x1a\xd7\x04\x12\xd4\x04\x0A\xc8\x04"\xc5\x04\x12\x8a\x04\x12\x87\x04\x12\xcc\x02\x0A\xc9\x02\x12\xba\x02\x0A\x0Al_orderkey\x0A\x09l_partkey\x0A\x09l_suppkey\x0A\x0cl_linenumber\x0A\x0Al_quantity\x0A\x0fl_extendedprice\x0A\x0Al_discount\x0A\x05l_tax\x0A\x0cl_returnflag\x0A\x0cl_linestatus\x0A\x0Al_shipdate\x0A\x0cl_commitdate\x0A\x0Dl_receiptdate\x0A\x0el_shipinstruct\x0A\x0Al_shipmode\x0A\x09l_comment\x12l\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01:\x0A\x0A\x08lineitem\x1a\xb5\x01\x1a\xb2\x01\x08\x0b\x12\x89\x01\x1a\x86\x01\x08\x0b\x12J\x1aH\x08\x0b\x12\x1e\x1a\x1c\x08\x12\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0A"\x00\x12\x06\x0A\x04\x80\x01\xbeD\x1a\x04\x0A\x02\x10\x01\x12\x1e\x1a\x1c\x08\x01\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0A"\x00\x12\x06\x0A\x04\x80\x01\xabG\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x120\x1a.\x08\x13\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x06"\x00\x12\x0b\x0A\x09Y\x9a\x99\x99\x99\x99\x99\xa9?\x12\x0b\x0A\x09Y\xecQ\xb8\x1e\x85\xeb\xb1?\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12\x1c\x1a\x1a\x08\x14\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00\x12\x04\x0A\x02(\x18\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01"6\x0A4\x08\x05\x12%\x1a#\x08\x06\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x05"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x06"\x00\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f \x03*\x07\xc2\x01\x04\x08\x02\x10&\x12\x07revenue'
elif query_number == 9:
return b'\x0A\x02\x08\x01\x12\x0c\x1a\x0A\x08\x01\x10\x01\x1a\x04less\x12\x0c\x1a\x0A\x08\x01\x10\x02\x1a\x04mean\x12\x0c\x1a\x0A\x08\x01\x10\x03\x1a\x04mean\x12\x0D\x1a\x0b\x08\x01\x10\x04\x1a\x05count\x12\x0b\x1a\x09\x08\x01\x10\x05\x1a\x03sum\x12\x10\x1a\x0e\x08\x01\x10\x06\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x07\x1a\x08subtract\x12\x0b\x1a\x09\x08\x01\x10\x08\x1a\x03add\x12\x0b\x1a\x09\x08\x01\x10\x09\x1a\x03sum\x12\x0e\x1a\x0c\x08\x01\x10\x0A\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0b\x1a\x03and\x12\x17\x1a\x15\x08\x01\x10\x0c\x1a\x0fstring_sql_like\x12\x0e\x1a\x0c\x08\x01\x10\x0D\x1a\x06equals\x12\x0e\x1a\x0c\x08\x01\x10\x0e\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0f\x1a\x03min\x12\x0f\x1a\x0D\x08\x01\x10\x10\x1a\x07greater\x12\x0b\x1a\x09\x08\x01\x10\x11\x1a\x03any\x12\x15\x1a\x13\x08\x01\x10\x12\x1a\x0Dgreater_equal\x12\x0f\x1a\x0D\x08\x01\x10\x13\x1a\x07between\x12\x0c\x1a\x0A\x08\x01\x10\x14\x1a\x04less\x12\x10\x1a\x0e\x08\x01\x10\x15\x1a\x08subtract\x12\x10\x1a\x0e\x08\x01\x10\x16\x1a\x08multiply\x12\x0c\x1a\x0A\x08\x01\x10\x17\x1a\x04cast\x12\x14\x1a\x12\x08\x01\x10\x18\x1a\x0cextract_year\x1a\xfd\x0c\x12\xfa\x0c\x0A\xdb\x0c*\xd8\x0c\x12\xb7\x0c"\xb4\x0c\x12\xfa\x0b\x12\xf7\x0b\x12\xcf\x0b:\xcc\x0b\x12\x9b\x0A2\x98\x0A\x12\x98\x092\x95\x09\x12\xa9\x072\xa6\x07\x12\xd6\x052\xd3\x05\x12\x85\x042\x82\x04\x12\xcc\x02\x0A\xc9\x02\x12\xba\x02\x0A\x0Al_orderkey\x0A\x09l_partkey\x0A\x09l_suppkey\x0A\x0cl_linenumber\x0A\x0Al_quantity\x0A\x0fl_extendedprice\x0A\x0Al_discount\x0A\x05l_tax\x0A\x0cl_returnflag\x0A\x0cl_linestatus\x0A\x0Al_shipdate\x0A\x0cl_commitdate\x0A\x0Dl_receiptdate\x0A\x0el_shipinstruct\x0A\x0Al_shipmode\x0A\x09l_comment\x12l\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01:\x0A\x0A\x08lineitem\x1a\x8a\x01\x0A\x87\x01\x12y\x0A\x09s_suppkey\x0A\x06s_name\x0A\x09s_address\x0A\x0bs_nationkey\x0A\x07s_phone\x0A\x09s_acctbal\x0A\x09s_comment\x12-\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01:\x0A\x0A\x08supplier""\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x10"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x02"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1as\x0Aq\x12c\x0A\x0Aps_partkey\x0A\x0Aps_suppkey\x0A\x0bps_availqty\x0A\x0Dps_supplycost\x0A\x0Aps_comment\x12!\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01:\x0A\x0A\x08partsupp"R\x1aP\x08\x0b\x12"\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x18"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x02"\x00\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x17"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x010\x01\x1a\xa4\x01\x0A\xa1\x01\x12\x96\x01\x0A\x09p_partkey\x0A\x06p_name\x0A\x06p_mfgr\x0A\x07p_brand\x0A\x06p_type\x0A\x06p_size\x0A\x0bp_container\x0A\x0Dp_retailprice\x0A\x09p_comment\x129\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01:\x06\x0A\x04part""\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x1c"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1a\xc2\x01\x0A\xbf\x01\x12\xb2\x01\x0A\x0Ao_orderkey\x0A\x09o_custkey\x0A\x0Do_orderstatus\x0A\x0co_totalprice\x0A\x0bo_orderdate\x0A\x0fo_orderpriority\x0A\x07o_clerk\x0A\x0eo_shippriority\x0A\x09o_comment\x12:\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01:\x08\x0A\x06orders" \x1a\x1e\x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08%"\x00\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1aU\x0AS\x12G\x0A\x0bn_nationkey\x0A\x06n_name\x0A\x0bn_regionkey\x0A\x09n_comment\x12\x18\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01:\x08\x0A\x06nation""\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x13"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08."\x00\x1a\x04\x0A\x02\x10\x010\x01\x1ap\x1an\x08\x15\x12:\x1a8\x08\x06\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x05"\x00\x12\x1f\x1a\x1d\x08\x07\x12\x04\x0A\x02\x10\x01\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x06"\x00\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f\x12%\x1a#\x08\x16\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x1a"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f\x1a"\x1a \x08\x17\x12\x16\x1a\x14\x08\x18\x12\x0A\x12\x08\x0A\x04\x12\x02\x08)"\x00\x1a\x04*\x02\x10\x01\x1a\x04b\x02\x10\x01\x1a\x0A\x12\x08\x0A\x04\x12\x02\x08/"\x00\x1a\x0A\x12\x08\x0A\x04\x12\x02\x08\x1d"\x00\x1a#\x1a!\x08\x0c\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x03"\x00\x12\x0b\x0A\x09b\x07%green%\x1a\x04\x0A\x02\x10\x01\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x02"\x00\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00"\x19\x0A\x17\x08\x05\x12\x08\x12\x06\x0A\x02\x12\x00"\x00 \x03*\x07\xc2\x01\x04\x08\x02\x10&\x1a\x0c\x0A\x08\x12\x06\x0A\x02\x12\x00"\x00\x10\x01\x1a\x0e\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x10\x03\x12\x06nation\x12\x06o_year\x12\x0Asum_profit'
elif query_number == 10:
return b'\x0A\x02\x08\x01\x12\x0c\x1a\x0A\x08\x01\x10\x01\x1a\x04less\x12\x0c\x1a\x0A\x08\x01\x10\x02\x1a\x04mean\x12\x0c\x1a\x0A\x08\x01\x10\x03\x1a\x04mean\x12\x0D\x1a\x0b\x08\x01\x10\x04\x1a\x05count\x12\x0b\x1a\x09\x08\x01\x10\x05\x1a\x03sum\x12\x10\x1a\x0e\x08\x01\x10\x06\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x07\x1a\x08subtract\x12\x0b\x1a\x09\x08\x01\x10\x08\x1a\x03add\x12\x0b\x1a\x09\x08\x01\x10\x09\x1a\x03sum\x12\x0e\x1a\x0c\x08\x01\x10\x0A\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0b\x1a\x03and\x12\x17\x1a\x15\x08\x01\x10\x0c\x1a\x0fstring_sql_like\x12\x0e\x1a\x0c\x08\x01\x10\x0D\x1a\x06equals\x12\x0e\x1a\x0c\x08\x01\x10\x0e\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0f\x1a\x03min\x12\x0f\x1a\x0D\x08\x01\x10\x10\x1a\x07greater\x12\x0b\x1a\x09\x08\x01\x10\x11\x1a\x03any\x12\x15\x1a\x13\x08\x01\x10\x12\x1a\x0Dgreater_equal\x12\x0f\x1a\x0D\x08\x01\x10\x13\x1a\x07between\x12\x0c\x1a\x0A\x08\x01\x10\x14\x1a\x04less\x12\x10\x1a\x0e\x08\x01\x10\x15\x1a\x08subtract\x12\x10\x1a\x0e\x08\x01\x10\x16\x1a\x08multiply\x12\x0c\x1a\x0A\x08\x01\x10\x17\x1a\x04cast\x12\x14\x1a\x12\x08\x01\x10\x18\x1a\x0cextract_year\x1a\xae\x0A\x12\xab\x0A\x0A\xda\x09\x1a\xd7\x09\x12\xd2\x09*\xcf\x09\x12\xbc\x09"\xb9\x09\x12\x89\x08\x12\x86\x08\x12\x8c\x072\x89\x07\x12\x89\x062\x86\x06\x12\x8e\x032\x8b\x03\x12\x9f\x01\x0A\x9c\x01\x12\x8d\x01\x0A\x09c_custkey\x0A\x06c_name\x0A\x09c_address\x0A\x0bc_nationkey\x0A\x07c_phone\x0A\x09c_acctbal\x0A\x0cc_mktsegment\x0A\x09c_comment\x123\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01:\x0A\x0A\x08customer\x1a\xc2\x01\x0A\xbf\x01\x12\xb2\x01\x0A\x0Ao_orderkey\x0A\x09o_custkey\x0A\x0Do_orderstatus\x0A\x0co_totalprice\x0A\x0bo_orderdate\x0A\x0fo_orderpriority\x0A\x07o_clerk\x0A\x0eo_shippriority\x0A\x09o_comment\x12:\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01:\x08\x0A\x06orders" \x1a\x1e\x08\x0A\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x09"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1a\xcc\x02\x0A\xc9\x02\x12\xba\x02\x0A\x0Al_orderkey\x0A\x09l_partkey\x0A\x09l_suppkey\x0A\x0cl_linenumber\x0A\x0Al_quantity\x0A\x0fl_extendedprice\x0A\x0Al_discount\x0A\x05l_tax\x0A\x0cl_returnflag\x0A\x0cl_linestatus\x0A\x0Al_shipdate\x0A\x0cl_commitdate\x0A\x0Dl_receiptdate\x0A\x0el_shipinstruct\x0A\x0Al_shipmode\x0A\x09l_comment\x12l\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01:\x0A\x0A\x08lineitem""\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x11"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x08"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1aU\x0AS\x12G\x0A\x0bn_nationkey\x0A\x06n_name\x0A\x0bn_regionkey\x0A\x09n_comment\x12\x18\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01:\x08\x0A\x06nation""\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x03"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08!"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1au\x1as\x08\x0b\x12J\x1aH\x08\x0b\x12\x1e\x1a\x1c\x08\x12\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0c"\x00\x12\x06\x0A\x04\x80\x01\xe2C\x1a\x04\x0A\x02\x10\x01\x12\x1e\x1a\x1c\x08\x01\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0c"\x00\x12\x06\x0A\x04\x80\x01\xbeD\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12\x1d\x1a\x1b\x08\x0D\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x19"\x00\x12\x05\x0A\x03b\x01R\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x0A\x0A\x08\x12\x06\x0A\x02\x12\x00"\x00\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x05"\x00\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08""\x00\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x02"\x00\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x07"\x00"K\x0AI\x08\x05\x12:\x1a8\x08\x06\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x16"\x00\x12\x1f\x1a\x1d\x08\x07\x12\x04\x0A\x02\x10\x01\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x17"\x00\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f \x03*\x07\xc2\x01\x04\x08\x02\x10&\x1a\x0e\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x07"\x00\x10\x03 \x14\x12\x09c_custkey\x12\x06c_name\x12\x09c_acctbal\x12\x07c_phone\x12\x06n_name\x12\x09c_address\x12\x09c_comment\x12\x07revenue'
elif query_number == 11:
return b'\x0A\x02\x08\x01\x12\x0c\x1a\x0A\x08\x01\x10\x01\x1a\x04less\x12\x0c\x1a\x0A\x08\x01\x10\x02\x1a\x04mean\x12\x0c\x1a\x0A\x08\x01\x10\x03\x1a\x04mean\x12\x0D\x1a\x0b\x08\x01\x10\x04\x1a\x05count\x12\x0b\x1a\x09\x08\x01\x10\x05\x1a\x03sum\x12\x10\x1a\x0e\x08\x01\x10\x06\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x07\x1a\x08subtract\x12\x0b\x1a\x09\x08\x01\x10\x08\x1a\x03add\x12\x0b\x1a\x09\x08\x01\x10\x09\x1a\x03sum\x12\x0e\x1a\x0c\x08\x01\x10\x0A\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0b\x1a\x03and\x12\x17\x1a\x15\x08\x01\x10\x0c\x1a\x0fstring_sql_like\x12\x0e\x1a\x0c\x08\x01\x10\x0D\x1a\x06equals\x12\x0e\x1a\x0c\x08\x01\x10\x0e\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0f\x1a\x03min\x12\x0f\x1a\x0D\x08\x01\x10\x10\x1a\x07greater\x12\x0b\x1a\x09\x08\x01\x10\x11\x1a\x03any\x12\x15\x1a\x13\x08\x01\x10\x12\x1a\x0Dgreater_equal\x12\x0f\x1a\x0D\x08\x01\x10\x13\x1a\x07between\x12\x0c\x1a\x0A\x08\x01\x10\x14\x1a\x04less\x12\x10\x1a\x0e\x08\x01\x10\x15\x1a\x08subtract\x12\x10\x1a\x0e\x08\x01\x10\x16\x1a\x08multiply\x12\x0c\x1a\x0A\x08\x01\x10\x17\x1a\x04cast\x12\x14\x1a\x12\x08\x01\x10\x18\x1a\x0cextract_year\x12\x0f\x1a\x0D\x08\x01\x10\x19\x1a\x07greater\x12\x10\x1a\x0e\x08\x01\x10\x1a\x1a\x08multiply\x1a\x96\x05\x12\x93\x05\x0A\xfd\x04*\xfa\x04\x12\xe7\x04\x12\xe4\x04\x12\xa3\x04"\xa0\x04\x12\xd9\x03\x12\xd6\x03\x12\xae\x032\xab\x03\x12\xab\x022\xa8\x02\x12s\x0Aq\x12c\x0A\x0Aps_partkey\x0A\x0Aps_suppkey\x0A\x0bps_availqty\x0A\x0Dps_supplycost\x0A\x0Aps_comment\x12!\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01:\x0A\x0A\x08partsupp\x1a\x8a\x01\x0A\x87\x01\x12y\x0A\x09s_suppkey\x0A\x06s_name\x0A\x09s_address\x0A\x0bs_nationkey\x0A\x07s_phone\x0A\x09s_acctbal\x0A\x09s_comment\x12-\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01:\x0A\x0A\x08supplier""\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x05"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1aU\x0AS\x12G\x0A\x0bn_nationkey\x0A\x06n_name\x0A\x0bn_regionkey\x0A\x09n_comment\x12\x18\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01:\x08\x0A\x06nation""\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0c"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x08"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1a#\x1a!\x08\x0D\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0D"\x00\x12\x0b\x0A\x09b\x07GERMANY\x1a\x04\x0A\x02\x10\x01\x1a\x0A\x0A\x08\x12\x06\x0A\x02\x12\x00"\x00"6\x0A4\x08\x05\x12%\x1a#\x08\x16\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x03"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x02"\x00\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f \x03*\x07\xc2\x01\x04\x08\x02\x10&\x1a<\x1a:\x08\x19\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x12$\x1a"\x08\x1a\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x12\x0b\x0A\x09Y-C\x1c\xeb\xe26\x1a?\x1a\x07\xc2\x01\x04\x08\x02\x10&\x1a\x04\x0A\x02\x10\x01\x1a\x0e\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x10\x03\x12\x0Aps_partkey\x12\x05value'
elif query_number == 12:
return b'\x0A\x02\x08\x01\x12\x0c\x1a\x0A\x08\x01\x10\x01\x1a\x04less\x12\x0c\x1a\x0A\x08\x01\x10\x02\x1a\x04mean\x12\x0c\x1a\x0A\x08\x01\x10\x03\x1a\x04mean\x12\x0D\x1a\x0b\x08\x01\x10\x04\x1a\x05count\x12\x0b\x1a\x09\x08\x01\x10\x05\x1a\x03sum\x12\x10\x1a\x0e\x08\x01\x10\x06\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x07\x1a\x08subtract\x12\x0b\x1a\x09\x08\x01\x10\x08\x1a\x03add\x12\x0b\x1a\x09\x08\x01\x10\x09\x1a\x03sum\x12\x0e\x1a\x0c\x08\x01\x10\x0A\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0b\x1a\x03and\x12\x17\x1a\x15\x08\x01\x10\x0c\x1a\x0fstring_sql_like\x12\x0e\x1a\x0c\x08\x01\x10\x0D\x1a\x06equals\x12\x0e\x1a\x0c\x08\x01\x10\x0e\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0f\x1a\x03min\x12\x0f\x1a\x0D\x08\x01\x10\x10\x1a\x07greater\x12\x0b\x1a\x09\x08\x01\x10\x11\x1a\x03any\x12\x15\x1a\x13\x08\x01\x10\x12\x1a\x0Dgreater_equal\x12\x0f\x1a\x0D\x08\x01\x10\x13\x1a\x07between\x12\x0c\x1a\x0A\x08\x01\x10\x14\x1a\x04less\x12\x10\x1a\x0e\x08\x01\x10\x15\x1a\x08subtract\x12\x10\x1a\x0e\x08\x01\x10\x16\x1a\x08multiply\x12\x0c\x1a\x0A\x08\x01\x10\x17\x1a\x04cast\x12\x14\x1a\x12\x08\x01\x10\x18\x1a\x0cextract_year\x12\x0f\x1a\x0D\x08\x01\x10\x19\x1a\x07greater\x12\x10\x1a\x0e\x08\x01\x10\x1a\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x1b\x1a\x08contains\x12\x12\x1a\x10\x08\x01\x10\x1c\x1a\x0Avalue_list\x12\x0b\x1a\x09\x08\x01\x10\x1d\x1a\x03sum\x12\x13\x1a\x11\x08\x01\x10\x1e\x1a\x0bsimple_case\x1a\x86\x08\x12\x83\x08\x0A\xd3\x07*\xd0\x07\x12\xbf\x07"\xbc\x07\x12\xa3\x06\x12\xa0\x06\x12\xbb\x042\xb8\x04\x12\xc2\x01\x0A\xbf\x01\x12\xb2\x01\x0A\x0Ao_orderkey\x0A\x09o_custkey\x0A\x0Do_orderstatus\x0A\x0co_totalprice\x0A\x0bo_orderdate\x0A\x0fo_orderpriority\x0A\x07o_clerk\x0A\x0eo_shippriority\x0A\x09o_comment\x12:\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01:\x08\x0A\x06orders\x1a\xcc\x02\x0A\xc9\x02\x12\xba\x02\x0A\x0Al_orderkey\x0A\x09l_partkey\x0A\x09l_suppkey\x0A\x0cl_linenumber\x0A\x0Al_quantity\x0A\x0fl_extendedprice\x0A\x0Al_discount\x0A\x05l_tax\x0A\x0cl_returnflag\x0A\x0cl_linestatus\x0A\x0Al_shipdate\x0A\x0cl_commitdate\x0A\x0Dl_receiptdate\x0A\x0el_shipinstruct\x0A\x0Al_shipmode\x0A\x09l_comment\x12l\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01:\x0A\x0A\x08lineitem" \x1a\x1e\x08\x0A\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x09"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1a\xdf\x01\x1a\xdc\x01\x08\x0b\x12\xb1\x01\x1a\xae\x01\x08\x0b\x12\x83\x01\x1a\x80\x01\x08\x0b\x12R\x1aP\x08\x0b\x12"\x1a \x08\x1b\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x17"\x00\x12\x0A\x1a\x08\x08\x1c\x1a\x04b\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08\x01\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x14"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x15"\x00\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08\x01\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x13"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x14"\x00\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12\x1e\x1a\x1c\x08\x12\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x15"\x00\x12\x06\x0A\x04\x80\x01\xbeD\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12\x1e\x1a\x1c\x08\x01\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x15"\x00\x12\x06\x0A\x04\x80\x01\xabG\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x17"\x00"B\x0A@\x08\x1d\x124\x1a2\x08\x1e\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x05"\x00\x12\x0A\x1a\x08\x08\x1c\x1a\x04b\x02\x10\x01\x12\x0A\x1a\x08\x08\x1c\x1a\x04\x12\x02\x10\x01\x12\x04\x0A\x02\x10\x00\x1a\x04\x12\x02\x10\x01 \x03*\x04:\x02\x10\x01"B\x0A@\x08\x1d\x124\x1a2\x08\x1e\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x05"\x00\x12\x0A\x1a\x08\x08\x1c\x1a\x04b\x02\x10\x01\x12\x0A\x1a\x08\x08\x1c\x1a\x04\x12\x02\x10\x01\x12\x04\x0A\x02\x10\x01\x1a\x04\x12\x02\x10\x01 \x03*\x04:\x02\x10\x01\x1a\x0c\x0A\x08\x12\x06\x0A\x02\x12\x00"\x00\x10\x01\x12\x0Al_shipmode\x12\x0fhigh_line_count\x12\x0elow_line_count'
elif query_number == 13:
return b'\x0A\x02\x08\x01\x12\x0c\x1a\x0A\x08\x01\x10\x01\x1a\x04less\x12\x0c\x1a\x0A\x08\x01\x10\x02\x1a\x04mean\x12\x0c\x1a\x0A\x08\x01\x10\x03\x1a\x04mean\x12\x0D\x1a\x0b\x08\x01\x10\x04\x1a\x05count\x12\x0b\x1a\x09\x08\x01\x10\x05\x1a\x03sum\x12\x10\x1a\x0e\x08\x01\x10\x06\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x07\x1a\x08subtract\x12\x0b\x1a\x09\x08\x01\x10\x08\x1a\x03add\x12\x0b\x1a\x09\x08\x01\x10\x09\x1a\x03sum\x12\x0e\x1a\x0c\x08\x01\x10\x0A\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0b\x1a\x03and\x12\x17\x1a\x15\x08\x01\x10\x0c\x1a\x0fstring_sql_like\x12\x0e\x1a\x0c\x08\x01\x10\x0D\x1a\x06equals\x12\x0e\x1a\x0c\x08\x01\x10\x0e\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0f\x1a\x03min\x12\x0f\x1a\x0D\x08\x01\x10\x10\x1a\x07greater\x12\x0b\x1a\x09\x08\x01\x10\x11\x1a\x03any\x12\x15\x1a\x13\x08\x01\x10\x12\x1a\x0Dgreater_equal\x12\x0f\x1a\x0D\x08\x01\x10\x13\x1a\x07between\x12\x0c\x1a\x0A\x08\x01\x10\x14\x1a\x04less\x12\x10\x1a\x0e\x08\x01\x10\x15\x1a\x08subtract\x12\x10\x1a\x0e\x08\x01\x10\x16\x1a\x08multiply\x12\x0c\x1a\x0A\x08\x01\x10\x17\x1a\x04cast\x12\x14\x1a\x12\x08\x01\x10\x18\x1a\x0cextract_year\x12\x0f\x1a\x0D\x08\x01\x10\x19\x1a\x07greater\x12\x10\x1a\x0e\x08\x01\x10\x1a\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x1b\x1a\x08contains\x12\x12\x1a\x10\x08\x01\x10\x1c\x1a\x0Avalue_list\x12\x0b\x1a\x09\x08\x01\x10\x1d\x1a\x03sum\x12\x13\x1a\x11\x08\x01\x10\x1e\x1a\x0bsimple_case\x12\x0b\x1a\x09\x08\x01\x10\x1f\x1a\x03not\x12\x0D\x1a\x0b\x08\x01\x10 \x1a\x05count\x1a\xe1\x04\x12\xde\x04\x0A\xc8\x04*\xc5\x04\x12\xa4\x04"\xa1\x04\x12\x82\x04"\xff\x03\x12\xd6\x032\xd3\x03\x12\x9f\x01\x0A\x9c\x01\x12\x8d\x01\x0A\x09c_custkey\x0A\x06c_name\x0A\x09c_address\x0A\x0bc_nationkey\x0A\x07c_phone\x0A\x09c_acctbal\x0A\x0cc_mktsegment\x0A\x09c_comment\x123\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01:\x0A\x0A\x08customer\x1a\xc2\x01\x0A\xbf\x01\x12\xb2\x01\x0A\x0Ao_orderkey\x0A\x09o_custkey\x0A\x0Do_orderstatus\x0A\x0co_totalprice\x0A\x0bo_orderdate\x0A\x0fo_orderpriority\x0A\x07o_clerk\x0A\x0eo_shippriority\x0A\x09o_comment\x12:\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01:\x08\x0A\x06orders"h\x1af\x08\x0b\x12 \x1a\x1e\x08\x0A\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x1a\x04\x0A\x02\x10\x01\x12:\x1a8\x08\x1f\x12.\x1a,\x08\x0c\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x08"\x00\x12\x16\x0A\x14b\x12%special%requests%\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x010\x03\x1a\x0A\x0A\x08\x12\x06\x0A\x02\x12\x00"\x00"\x18\x0A\x16\x08 \x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x08"\x00 \x03*\x04:\x02\x10\x01\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00"\x0c\x0A\x0A\x08\x04 \x03*\x04:\x02\x10\x01\x1a\x0e\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x10\x03\x1a\x0c\x0A\x08\x12\x06\x0A\x02\x12\x00"\x00\x10\x03\x12\x07c_count\x12\x08custdist'
elif query_number == 16:
return b'\x0A\x02\x08\x01\x12\x0c\x1a\x0A\x08\x01\x10\x01\x1a\x04less\x12\x0c\x1a\x0A\x08\x01\x10\x02\x1a\x04mean\x12\x0c\x1a\x0A\x08\x01\x10\x03\x1a\x04mean\x12\x0D\x1a\x0b\x08\x01\x10\x04\x1a\x05count\x12\x0b\x1a\x09\x08\x01\x10\x05\x1a\x03sum\x12\x10\x1a\x0e\x08\x01\x10\x06\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x07\x1a\x08subtract\x12\x0b\x1a\x09\x08\x01\x10\x08\x1a\x03add\x12\x0b\x1a\x09\x08\x01\x10\x09\x1a\x03sum\x12\x0e\x1a\x0c\x08\x01\x10\x0A\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0b\x1a\x03and\x12\x17\x1a\x15\x08\x01\x10\x0c\x1a\x0fstring_sql_like\x12\x0e\x1a\x0c\x08\x01\x10\x0D\x1a\x06equals\x12\x0e\x1a\x0c\x08\x01\x10\x0e\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0f\x1a\x03min\x12\x0f\x1a\x0D\x08\x01\x10\x10\x1a\x07greater\x12\x0b\x1a\x09\x08\x01\x10\x11\x1a\x03any\x12\x15\x1a\x13\x08\x01\x10\x12\x1a\x0Dgreater_equal\x12\x0f\x1a\x0D\x08\x01\x10\x13\x1a\x07between\x12\x0c\x1a\x0A\x08\x01\x10\x14\x1a\x04less\x12\x10\x1a\x0e\x08\x01\x10\x15\x1a\x08subtract\x12\x10\x1a\x0e\x08\x01\x10\x16\x1a\x08multiply\x12\x0c\x1a\x0A\x08\x01\x10\x17\x1a\x04cast\x12\x14\x1a\x12\x08\x01\x10\x18\x1a\x0cextract_year\x12\x0f\x1a\x0D\x08\x01\x10\x19\x1a\x07greater\x12\x10\x1a\x0e\x08\x01\x10\x1a\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x1b\x1a\x08contains\x12\x12\x1a\x10\x08\x01\x10\x1c\x1a\x0Avalue_list\x12\x0b\x1a\x09\x08\x01\x10\x1d\x1a\x03sum\x12\x13\x1a\x11\x08\x01\x10\x1e\x1a\x0bsimple_case\x12\x0b\x1a\x09\x08\x01\x10\x1f\x1a\x03not\x12\x0D\x1a\x0b\x08\x01\x10 \x1a\x05count\x12\x0e\x1a\x0c\x08\x01\x10!\x1a\x06divide\x12\x10\x1a\x0e\x08\x01\x10"\x1a\x08multiply\x12\x15\x1a\x13\x08\x01\x10#\x1a\x0Dsearched_case\x12\x12\x1a\x10\x08\x01\x10$\x1a\x0Anot_equals\x12\x10\x1a\x0e\x08\x01\x10%\x1a\x08contains\x12\x10\x1a\x0e\x08\x01\x10&\x1a\x08contains\x12\x16\x1a\x14\x08\x01\x10\'\x1a\x0ecount_distinct\x1a\xde\x05\x12\xdb\x05\x0A\xb1\x05*\xae\x05\x12\xed\x04"\xea\x04\x12\xa3\x04\x12\xa0\x04\x12\xc3\x022\xc0\x02\x12s\x0Aq\x12c\x0A\x0Aps_partkey\x0A\x0Aps_suppkey\x0A\x0bps_availqty\x0A\x0Dps_supplycost\x0A\x0Aps_comment\x12!\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01:\x0A\x0A\x08partsupp\x1a\xa4\x01\x0A\xa1\x01\x12\x96\x01\x0A\x09p_partkey\x0A\x06p_name\x0A\x06p_mfgr\x0A\x07p_brand\x0A\x06p_type\x0A\x06p_size\x0A\x0bp_container\x0A\x0Dp_retailprice\x0A\x09p_comment\x129\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01:\x06\x0A\x04part" \x1a\x1e\x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x05"\x00\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1a\xd7\x01\x1a\xd4\x01\x08\x0b\x12\x9b\x01\x1a\x98\x01\x08\x0b\x12j\x1ah\x08\x0b\x12$\x1a"\x08$\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x08"\x00\x12\x0c\x0A\x0Ab\x08Brand#45\x1a\x04\x0A\x02\x10\x01\x128\x1a6\x08\x1f\x12,\x1a*\x08\x0c\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x09"\x00\x12\x14\x0A\x12b\x10MEDIUM POLISHED%\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08%\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0A"\x00\x12\x0A\x1a\x08\x08\x1c\x1a\x04\x12\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12,\x1a*\x08\x1f\x12 \x1a\x1e\x08&\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x08"\x00\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x09"\x00\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x0A"\x00"\x18\x0A\x16\x08\'\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00 \x03*\x04:\x02\x10\x01\x1a\x0e\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x03"\x00\x10\x03\x1a\x0c\x0A\x08\x12\x06\x0A\x02\x12\x00"\x00\x10\x01\x1a\x0e\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x10\x01\x1a\x0e\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x02"\x00\x10\x01\x12\x07p_brand\x12\x06p_type\x12\x06p_size\x12\x0csupplier_cnt'
elif query_number == 18:
return b'\x0A\x02\x08\x01\x12\x0c\x1a\x0A\x08\x01\x10\x01\x1a\x04less\x12\x0c\x1a\x0A\x08\x01\x10\x02\x1a\x04mean\x12\x0c\x1a\x0A\x08\x01\x10\x03\x1a\x04mean\x12\x0D\x1a\x0b\x08\x01\x10\x04\x1a\x05count\x12\x0b\x1a\x09\x08\x01\x10\x05\x1a\x03sum\x12\x10\x1a\x0e\x08\x01\x10\x06\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x07\x1a\x08subtract\x12\x0b\x1a\x09\x08\x01\x10\x08\x1a\x03add\x12\x0b\x1a\x09\x08\x01\x10\x09\x1a\x03sum\x12\x0e\x1a\x0c\x08\x01\x10\x0A\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0b\x1a\x03and\x12\x17\x1a\x15\x08\x01\x10\x0c\x1a\x0fstring_sql_like\x12\x0e\x1a\x0c\x08\x01\x10\x0D\x1a\x06equals\x12\x0e\x1a\x0c\x08\x01\x10\x0e\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0f\x1a\x03min\x12\x0f\x1a\x0D\x08\x01\x10\x10\x1a\x07greater\x12\x0b\x1a\x09\x08\x01\x10\x11\x1a\x03any\x12\x15\x1a\x13\x08\x01\x10\x12\x1a\x0Dgreater_equal\x12\x0f\x1a\x0D\x08\x01\x10\x13\x1a\x07between\x12\x0c\x1a\x0A\x08\x01\x10\x14\x1a\x04less\x12\x10\x1a\x0e\x08\x01\x10\x15\x1a\x08subtract\x12\x10\x1a\x0e\x08\x01\x10\x16\x1a\x08multiply\x12\x0c\x1a\x0A\x08\x01\x10\x17\x1a\x04cast\x12\x14\x1a\x12\x08\x01\x10\x18\x1a\x0cextract_year\x12\x0f\x1a\x0D\x08\x01\x10\x19\x1a\x07greater\x12\x10\x1a\x0e\x08\x01\x10\x1a\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x1b\x1a\x08contains\x12\x12\x1a\x10\x08\x01\x10\x1c\x1a\x0Avalue_list\x12\x0b\x1a\x09\x08\x01\x10\x1d\x1a\x03sum\x12\x13\x1a\x11\x08\x01\x10\x1e\x1a\x0bsimple_case\x12\x0b\x1a\x09\x08\x01\x10\x1f\x1a\x03not\x12\x0D\x1a\x0b\x08\x01\x10 \x1a\x05count\x12\x0e\x1a\x0c\x08\x01\x10!\x1a\x06divide\x12\x10\x1a\x0e\x08\x01\x10"\x1a\x08multiply\x12\x15\x1a\x13\x08\x01\x10#\x1a\x0Dsearched_case\x12\x12\x1a\x10\x08\x01\x10$\x1a\x0Anot_equals\x12\x10\x1a\x0e\x08\x01\x10%\x1a\x08contains\x12\x10\x1a\x0e\x08\x01\x10&\x1a\x08contains\x12\x16\x1a\x14\x08\x01\x10\'\x1a\x0ecount_distinct\x12\x0c\x1a\x0A\x08\x01\x10(\x1a\x04less\x12\x10\x1a\x0e\x08\x01\x10)\x1a\x08multiply\x1a\x8c\x08\x12\x89\x08\x0A\xc3\x07\x1a\xc0\x07\x12\xbb\x07*\xb8\x07\x12\x95\x07"\x92\x07\x12\xb1\x06\x12\xae\x06\x12\x89\x062\x86\x06\x12\x8e\x032\x8b\x03\x12\x9f\x01\x0A\x9c\x01\x12\x8d\x01\x0A\x09c_custkey\x0A\x06c_name\x0A\x09c_address\x0A\x0bc_nationkey\x0A\x07c_phone\x0A\x09c_acctbal\x0A\x0cc_mktsegment\x0A\x09c_comment\x123\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01:\x0A\x0A\x08customer\x1a\xc2\x01\x0A\xbf\x01\x12\xb2\x01\x0A\x0Ao_orderkey\x0A\x09o_custkey\x0A\x0Do_orderstatus\x0A\x0co_totalprice\x0A\x0bo_orderdate\x0A\x0fo_orderpriority\x0A\x07o_clerk\x0A\x0eo_shippriority\x0A\x09o_comment\x12:\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01:\x08\x0A\x06orders" \x1a\x1e\x08\x0A\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x09"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1a\xcc\x02\x0A\xc9\x02\x12\xba\x02\x0A\x0Al_orderkey\x0A\x09l_partkey\x0A\x09l_suppkey\x0A\x0cl_linenumber\x0A\x0Al_quantity\x0A\x0fl_extendedprice\x0A\x0Al_discount\x0A\x05l_tax\x0A\x0cl_returnflag\x0A\x0cl_linestatus\x0A\x0Al_shipdate\x0A\x0cl_commitdate\x0A\x0Dl_receiptdate\x0A\x0el_shipinstruct\x0A\x0Al_shipmode\x0A\x09l_comment\x12l\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01:\x0A\x0A\x08lineitem""\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x08"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x11"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1a \x1a\x1e\x08&\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x08"\x00\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x1a\x04\x0A\x02\x10\x01\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x1a\x0A\x0A\x08\x12\x06\x0A\x02\x12\x00"\x00\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x08"\x00\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x0c"\x00\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x0b"\x00"\x18\x0A\x16\x08\x09\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x15"\x00 \x03*\x04:\x02\x10\x01\x1a\x0e\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00\x10\x03\x1a\x0e\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x03"\x00\x10\x01 d\x12\x06c_name\x12\x09c_custkey\x12\x0Ao_orderkey\x12\x0bo_orderdate\x12\x0co_totalprice\x12\x07sum_qty'
elif query_number == 19:
return b'\x0A\x02\x08\x01\x12\x0c\x1a\x0A\x08\x01\x10\x01\x1a\x04less\x12\x0c\x1a\x0A\x08\x01\x10\x02\x1a\x04mean\x12\x0c\x1a\x0A\x08\x01\x10\x03\x1a\x04mean\x12\x0D\x1a\x0b\x08\x01\x10\x04\x1a\x05count\x12\x0b\x1a\x09\x08\x01\x10\x05\x1a\x03sum\x12\x10\x1a\x0e\x08\x01\x10\x06\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x07\x1a\x08subtract\x12\x0b\x1a\x09\x08\x01\x10\x08\x1a\x03add\x12\x0b\x1a\x09\x08\x01\x10\x09\x1a\x03sum\x12\x0e\x1a\x0c\x08\x01\x10\x0A\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0b\x1a\x03and\x12\x17\x1a\x15\x08\x01\x10\x0c\x1a\x0fstring_sql_like\x12\x0e\x1a\x0c\x08\x01\x10\x0D\x1a\x06equals\x12\x0e\x1a\x0c\x08\x01\x10\x0e\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0f\x1a\x03min\x12\x0f\x1a\x0D\x08\x01\x10\x10\x1a\x07greater\x12\x0b\x1a\x09\x08\x01\x10\x11\x1a\x03any\x12\x15\x1a\x13\x08\x01\x10\x12\x1a\x0Dgreater_equal\x12\x0f\x1a\x0D\x08\x01\x10\x13\x1a\x07between\x12\x0c\x1a\x0A\x08\x01\x10\x14\x1a\x04less\x12\x10\x1a\x0e\x08\x01\x10\x15\x1a\x08subtract\x12\x10\x1a\x0e\x08\x01\x10\x16\x1a\x08multiply\x12\x0c\x1a\x0A\x08\x01\x10\x17\x1a\x04cast\x12\x14\x1a\x12\x08\x01\x10\x18\x1a\x0cextract_year\x12\x0f\x1a\x0D\x08\x01\x10\x19\x1a\x07greater\x12\x10\x1a\x0e\x08\x01\x10\x1a\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x1b\x1a\x08contains\x12\x12\x1a\x10\x08\x01\x10\x1c\x1a\x0Avalue_list\x12\x0b\x1a\x09\x08\x01\x10\x1d\x1a\x03sum\x12\x13\x1a\x11\x08\x01\x10\x1e\x1a\x0bsimple_case\x12\x0b\x1a\x09\x08\x01\x10\x1f\x1a\x03not\x12\x0D\x1a\x0b\x08\x01\x10 \x1a\x05count\x12\x0e\x1a\x0c\x08\x01\x10!\x1a\x06divide\x12\x10\x1a\x0e\x08\x01\x10"\x1a\x08multiply\x12\x15\x1a\x13\x08\x01\x10#\x1a\x0Dsearched_case\x12\x12\x1a\x10\x08\x01\x10$\x1a\x0Anot_equals\x12\x10\x1a\x0e\x08\x01\x10%\x1a\x08contains\x12\x10\x1a\x0e\x08\x01\x10&\x1a\x08contains\x12\x16\x1a\x14\x08\x01\x10\'\x1a\x0ecount_distinct\x12\x0c\x1a\x0A\x08\x01\x10(\x1a\x04less\x12\x10\x1a\x0e\x08\x01\x10)\x1a\x08multiply\x12\x0A\x1a\x08\x08\x01\x10*\x1a\x02or\x12\x15\x1a\x13\x08\x01\x10+\x1a\x0Dgreater_equal\x12\x12\x1a\x10\x08\x01\x10,\x1a\x0Aless_equal\x12\x0f\x1a\x0D\x08\x01\x10-\x1a\x07between\x1a\x8a\x0D\x12\x87\x0D\x0A\xfb\x0c"\xf8\x0c\x12\xa8\x0c\x12\xa5\x0c\x12\x9f\x042\x9c\x04\x12\xcc\x02\x0A\xc9\x02\x12\xba\x02\x0A\x0Al_orderkey\x0A\x09l_partkey\x0A\x09l_suppkey\x0A\x0cl_linenumber\x0A\x0Al_quantity\x0A\x0fl_extendedprice\x0A\x0Al_discount\x0A\x05l_tax\x0A\x0cl_returnflag\x0A\x0cl_linestatus\x0A\x0Al_shipdate\x0A\x0cl_commitdate\x0A\x0Dl_receiptdate\x0A\x0el_shipinstruct\x0A\x0Al_shipmode\x0A\x09l_comment\x12l\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01:\x0A\x0A\x08lineitem\x1a\xa4\x01\x0A\xa1\x01\x12\x96\x01\x0A\x09p_partkey\x0A\x06p_name\x0A\x06p_mfgr\x0A\x07p_brand\x0A\x06p_type\x0A\x06p_size\x0A\x0bp_container\x0A\x0Dp_retailprice\x0A\x09p_comment\x129\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01:\x06\x0A\x04part""\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x10"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1a\x80\x08\x1a\xfd\x07\x08*\x12\xa5\x05\x1a\xa2\x05\x08*\x12\xca\x02\x1a\xc7\x02\x08\x0b\x12\x8d\x02\x1a\x8a\x02\x08\x0b\x12\xdb\x01\x1a\xd8\x01\x08\x0b\x12\xa9\x01\x1a\xa6\x01\x08\x0b\x12~\x1a|\x08\x0b\x12T\x1aR\x08\x0b\x12$\x1a"\x08\x0D\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x13"\x00\x12\x0c\x0A\x0Ab\x08Brand#12\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08\x1b\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x16"\x00\x12\x0A\x1a\x08\x08\x1c\x1a\x04b\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12\x1c\x1a\x1a\x08+\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00\x12\x04\x0A\x02(\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12\x1c\x1a\x1a\x08,\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00\x12\x04\x0A\x02(\x0b\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08-\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x15"\x00\x12\x04\x0A\x02\x10\x01\x12\x04\x0A\x02\x10\x05\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08\x1b\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0e"\x00\x12\x0A\x1a\x08\x08\x1c\x1a\x04b\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12-\x1a+\x08\x0D\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0D"\x00\x12\x15\x0A\x13b\x11deliver in person\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12\xca\x02\x1a\xc7\x02\x08\x0b\x12\x8d\x02\x1a\x8a\x02\x08\x0b\x12\xdb\x01\x1a\xd8\x01\x08\x0b\x12\xa9\x01\x1a\xa6\x01\x08\x0b\x12~\x1a|\x08\x0b\x12T\x1aR\x08\x0b\x12$\x1a"\x08\x0D\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x13"\x00\x12\x0c\x0A\x0Ab\x08Brand#23\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08\x1b\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x16"\x00\x12\x0A\x1a\x08\x08\x1c\x1a\x04b\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12\x1c\x1a\x1a\x08+\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00\x12\x04\x0A\x02(\x0A\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12\x1c\x1a\x1a\x08,\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00\x12\x04\x0A\x02(\x14\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08-\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x15"\x00\x12\x04\x0A\x02\x10\x01\x12\x04\x0A\x02\x10\x0A\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08\x1b\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0e"\x00\x12\x0A\x1a\x08\x08\x1c\x1a\x04b\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12-\x1a+\x08\x0D\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0D"\x00\x12\x15\x0A\x13b\x11deliver in person\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12\xca\x02\x1a\xc7\x02\x08\x0b\x12\x8d\x02\x1a\x8a\x02\x08\x0b\x12\xdb\x01\x1a\xd8\x01\x08\x0b\x12\xa9\x01\x1a\xa6\x01\x08\x0b\x12~\x1a|\x08\x0b\x12T\x1aR\x08\x0b\x12$\x1a"\x08\x0D\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x13"\x00\x12\x0c\x0A\x0Ab\x08Brand#34\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08\x1b\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x16"\x00\x12\x0A\x1a\x08\x08\x1c\x1a\x04b\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12\x1c\x1a\x1a\x08+\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00\x12\x04\x0A\x02(\x14\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12\x1c\x1a\x1a\x08,\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00\x12\x04\x0A\x02(\x1e\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08-\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x15"\x00\x12\x04\x0A\x02\x10\x01\x12\x04\x0A\x02\x10\x0f\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08\x1b\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0e"\x00\x12\x0A\x1a\x08\x08\x1c\x1a\x04b\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12-\x1a+\x08\x0D\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0D"\x00\x12\x15\x0A\x13b\x11deliver in person\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01"K\x0AI\x08\x05\x12:\x1a8\x08\x06\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x05"\x00\x12\x1f\x1a\x1d\x08\x07\x12\x04\x0A\x02\x10\x01\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x06"\x00\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f\x1a\x07\xc2\x01\x04\x08\x02\x10\x0f \x03*\x07\xc2\x01\x04\x08\x02\x10&\x12\x07revenue'
elif query_number == 20:
return b'\x0A\x02\x08\x01\x12\x0c\x1a\x0A\x08\x01\x10\x01\x1a\x04less\x12\x0c\x1a\x0A\x08\x01\x10\x02\x1a\x04mean\x12\x0c\x1a\x0A\x08\x01\x10\x03\x1a\x04mean\x12\x0D\x1a\x0b\x08\x01\x10\x04\x1a\x05count\x12\x0b\x1a\x09\x08\x01\x10\x05\x1a\x03sum\x12\x10\x1a\x0e\x08\x01\x10\x06\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x07\x1a\x08subtract\x12\x0b\x1a\x09\x08\x01\x10\x08\x1a\x03add\x12\x0b\x1a\x09\x08\x01\x10\x09\x1a\x03sum\x12\x0e\x1a\x0c\x08\x01\x10\x0A\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0b\x1a\x03and\x12\x17\x1a\x15\x08\x01\x10\x0c\x1a\x0fstring_sql_like\x12\x0e\x1a\x0c\x08\x01\x10\x0D\x1a\x06equals\x12\x0e\x1a\x0c\x08\x01\x10\x0e\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0f\x1a\x03min\x12\x0f\x1a\x0D\x08\x01\x10\x10\x1a\x07greater\x12\x0b\x1a\x09\x08\x01\x10\x11\x1a\x03any\x12\x15\x1a\x13\x08\x01\x10\x12\x1a\x0Dgreater_equal\x12\x0f\x1a\x0D\x08\x01\x10\x13\x1a\x07between\x12\x0c\x1a\x0A\x08\x01\x10\x14\x1a\x04less\x12\x10\x1a\x0e\x08\x01\x10\x15\x1a\x08subtract\x12\x10\x1a\x0e\x08\x01\x10\x16\x1a\x08multiply\x12\x0c\x1a\x0A\x08\x01\x10\x17\x1a\x04cast\x12\x14\x1a\x12\x08\x01\x10\x18\x1a\x0cextract_year\x12\x0f\x1a\x0D\x08\x01\x10\x19\x1a\x07greater\x12\x10\x1a\x0e\x08\x01\x10\x1a\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x1b\x1a\x08contains\x12\x12\x1a\x10\x08\x01\x10\x1c\x1a\x0Avalue_list\x12\x0b\x1a\x09\x08\x01\x10\x1d\x1a\x03sum\x12\x13\x1a\x11\x08\x01\x10\x1e\x1a\x0bsimple_case\x12\x0b\x1a\x09\x08\x01\x10\x1f\x1a\x03not\x12\x0D\x1a\x0b\x08\x01\x10 \x1a\x05count\x12\x0e\x1a\x0c\x08\x01\x10!\x1a\x06divide\x12\x10\x1a\x0e\x08\x01\x10"\x1a\x08multiply\x12\x15\x1a\x13\x08\x01\x10#\x1a\x0Dsearched_case\x12\x12\x1a\x10\x08\x01\x10$\x1a\x0Anot_equals\x12\x10\x1a\x0e\x08\x01\x10%\x1a\x08contains\x12\x10\x1a\x0e\x08\x01\x10&\x1a\x08contains\x12\x16\x1a\x14\x08\x01\x10\'\x1a\x0ecount_distinct\x12\x0c\x1a\x0A\x08\x01\x10(\x1a\x04less\x12\x10\x1a\x0e\x08\x01\x10)\x1a\x08multiply\x12\x0A\x1a\x08\x08\x01\x10*\x1a\x02or\x12\x15\x1a\x13\x08\x01\x10+\x1a\x0Dgreater_equal\x12\x12\x1a\x10\x08\x01\x10,\x1a\x0Aless_equal\x12\x0f\x1a\x0D\x08\x01\x10-\x1a\x07between\x1a\xb0\x03\x12\xad\x03\x0A\x97\x03*\x94\x03\x12\x83\x03:\x80\x03\x12\xe5\x02\x12\xe2\x02\x12\x8d\x022\x8a\x02\x12\x8a\x01\x0A\x87\x01\x12y\x0A\x09s_suppkey\x0A\x06s_name\x0A\x09s_address\x0A\x0bs_nationkey\x0A\x07s_phone\x0A\x09s_acctbal\x0A\x09s_comment\x12-\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01:\x0A\x0A\x08supplier\x1aU\x0AS\x12G\x0A\x0bn_nationkey\x0A\x06n_name\x0A\x0bn_regionkey\x0A\x09n_comment\x12\x18\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01:\x08\x0A\x06nation""\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x03"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x07"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1aP\x1aN\x08\x0b\x12"\x1a \x08\x0D\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x08"\x00\x12\x0A\x0A\x08b\x06CANADA\x1a\x04\x0A\x02\x10\x01\x12 \x1a\x1e\x08&\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x1a\x0A\x12\x08\x0A\x04\x12\x02\x08\x02"\x00\x1a\x0c\x0A\x08\x12\x06\x0A\x02\x12\x00"\x00\x10\x01\x12\x06s_name\x12\x09s_address'
elif query_number == 21:
return b'\x0A\x02\x08\x01\x12\x0c\x1a\x0A\x08\x01\x10\x01\x1a\x04less\x12\x0c\x1a\x0A\x08\x01\x10\x02\x1a\x04mean\x12\x0c\x1a\x0A\x08\x01\x10\x03\x1a\x04mean\x12\x0D\x1a\x0b\x08\x01\x10\x04\x1a\x05count\x12\x0b\x1a\x09\x08\x01\x10\x05\x1a\x03sum\x12\x10\x1a\x0e\x08\x01\x10\x06\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x07\x1a\x08subtract\x12\x0b\x1a\x09\x08\x01\x10\x08\x1a\x03add\x12\x0b\x1a\x09\x08\x01\x10\x09\x1a\x03sum\x12\x0e\x1a\x0c\x08\x01\x10\x0A\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0b\x1a\x03and\x12\x17\x1a\x15\x08\x01\x10\x0c\x1a\x0fstring_sql_like\x12\x0e\x1a\x0c\x08\x01\x10\x0D\x1a\x06equals\x12\x0e\x1a\x0c\x08\x01\x10\x0e\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0f\x1a\x03min\x12\x0f\x1a\x0D\x08\x01\x10\x10\x1a\x07greater\x12\x0b\x1a\x09\x08\x01\x10\x11\x1a\x03any\x12\x15\x1a\x13\x08\x01\x10\x12\x1a\x0Dgreater_equal\x12\x0f\x1a\x0D\x08\x01\x10\x13\x1a\x07between\x12\x0c\x1a\x0A\x08\x01\x10\x14\x1a\x04less\x12\x10\x1a\x0e\x08\x01\x10\x15\x1a\x08subtract\x12\x10\x1a\x0e\x08\x01\x10\x16\x1a\x08multiply\x12\x0c\x1a\x0A\x08\x01\x10\x17\x1a\x04cast\x12\x14\x1a\x12\x08\x01\x10\x18\x1a\x0cextract_year\x12\x0f\x1a\x0D\x08\x01\x10\x19\x1a\x07greater\x12\x10\x1a\x0e\x08\x01\x10\x1a\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x1b\x1a\x08contains\x12\x12\x1a\x10\x08\x01\x10\x1c\x1a\x0Avalue_list\x12\x0b\x1a\x09\x08\x01\x10\x1d\x1a\x03sum\x12\x13\x1a\x11\x08\x01\x10\x1e\x1a\x0bsimple_case\x12\x0b\x1a\x09\x08\x01\x10\x1f\x1a\x03not\x12\x0D\x1a\x0b\x08\x01\x10 \x1a\x05count\x12\x0e\x1a\x0c\x08\x01\x10!\x1a\x06divide\x12\x10\x1a\x0e\x08\x01\x10"\x1a\x08multiply\x12\x15\x1a\x13\x08\x01\x10#\x1a\x0Dsearched_case\x12\x12\x1a\x10\x08\x01\x10$\x1a\x0Anot_equals\x12\x10\x1a\x0e\x08\x01\x10%\x1a\x08contains\x12\x10\x1a\x0e\x08\x01\x10&\x1a\x08contains\x12\x16\x1a\x14\x08\x01\x10\'\x1a\x0ecount_distinct\x12\x0c\x1a\x0A\x08\x01\x10(\x1a\x04less\x12\x10\x1a\x0e\x08\x01\x10)\x1a\x08multiply\x12\x0A\x1a\x08\x08\x01\x10*\x1a\x02or\x12\x15\x1a\x13\x08\x01\x10+\x1a\x0Dgreater_equal\x12\x12\x1a\x10\x08\x01\x10,\x1a\x0Aless_equal\x12\x0f\x1a\x0D\x08\x01\x10-\x1a\x07between\x12\x12\x1a\x10\x08\x01\x10.\x1a\x0Anot_equals\x1a\xd7\x0b\x12\xd4\x0b\x0A\xc0\x0b\x1a\xbd\x0b\x12\xb8\x0b*\xb5\x0b\x12\x94\x0b"\x91\x0b\x12\xf2\x0A\x12\xef\x0A\x12\xd1\x07:\xce\x07\x12\xf7\x062\xf4\x06\x12\xf4\x052\xf1\x05\x12\x83\x042\x80\x04\x12\x8a\x01\x0A\x87\x01\x12y\x0A\x09s_suppkey\x0A\x06s_name\x0A\x09s_address\x0A\x0bs_nationkey\x0A\x07s_phone\x0A\x09s_acctbal\x0A\x09s_comment\x12-\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01:\x0A\x0A\x08supplier\x1a\xcc\x02\x0A\xc9\x02\x12\xba\x02\x0A\x0Al_orderkey\x0A\x09l_partkey\x0A\x09l_suppkey\x0A\x0cl_linenumber\x0A\x0Al_quantity\x0A\x0fl_extendedprice\x0A\x0Al_discount\x0A\x05l_tax\x0A\x0cl_returnflag\x0A\x0cl_linestatus\x0A\x0Al_shipdate\x0A\x0cl_commitdate\x0A\x0Dl_receiptdate\x0A\x0el_shipinstruct\x0A\x0Al_shipmode\x0A\x09l_comment\x12l\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01:\x0A\x0A\x08lineitem" \x1a\x1e\x08\x0A\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x09"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1a\xc2\x01\x0A\xbf\x01\x12\xb2\x01\x0A\x0Ao_orderkey\x0A\x09o_custkey\x0A\x0Do_orderstatus\x0A\x0co_totalprice\x0A\x0bo_orderdate\x0A\x0fo_orderpriority\x0A\x07o_clerk\x0A\x0eo_shippriority\x0A\x09o_comment\x12:\x0A\x04*\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x05\x82\x01\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01:\x08\x0A\x06orders""\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x17"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x07"\x00\x1a\x04\x0A\x02\x10\x010\x01\x1aU\x0AS\x12G\x0A\x0bn_nationkey\x0A\x06n_name\x0A\x0bn_regionkey\x0A\x09n_comment\x12\x18\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01:\x08\x0A\x06nation""\x1a \x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x03"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08 "\x00\x1a\x04\x0A\x02\x10\x010\x01\x1a\x0A\x12\x08\x0A\x04\x12\x02\x08\x07"\x00\x1a\x0A\x12\x08\x0A\x04\x12\x02\x08\x19"\x00\x1a\x0A\x12\x08\x0A\x04\x12\x02\x08\x13"\x00\x1a\x0A\x12\x08\x0A\x04\x12\x02\x08\x12"\x00\x1a\x0A\x12\x08\x0A\x04\x12\x02\x08\x09"\x00\x1a\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x1a\x0A\x12\x08\x0A\x04\x12\x02\x08!"\x00\x1a\x98\x03\x1a\x95\x03\x08\x0b\x12\xee\x01\x1a\xeb\x01\x08\x0b\x12\x84\x01\x1a\x81\x01\x08\x0b\x12M\x1aK\x08\x0b\x12\x1d\x1a\x1b\x08\x0D\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x12\x05\x0A\x03b\x01F\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08\x10\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x02"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x03"\x00\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12(\x1a&\x08\x0D\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x06"\x00\x12\x10\x0A\x0eb\x0cSAUDI ARABIA\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12Z\x1aX\x08\x11\x12N\x1aL\x08\x0b\x12\x1e\x1a\x1c\x08\x0A\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08.\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x02"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12\x99\x01\x1a\x96\x01\x08\x1f\x12\x8b\x01\x1a\x88\x01\x08\x11\x12~\x1a|\x08\x0b\x12N\x1aL\x08\x0b\x12\x1e\x1a\x1c\x08\x0A\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08.\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x02"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12"\x1a \x08\x10\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0c"\x00\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x0b"\x00\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x0c\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x05"\x00"\x0c\x0A\x0A\x08\x04 \x03*\x04:\x02\x10\x01\x1a\x0e\x0A\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x10\x03\x1a\x0c\x0A\x08\x12\x06\x0A\x02\x12\x00"\x00\x10\x01 d\x12\x06s_name\x12\x07numwait'
elif query_number == 22:
return b'\x0A\x02\x08\x01\x12\x0c\x1a\x0A\x08\x01\x10\x01\x1a\x04less\x12\x0c\x1a\x0A\x08\x01\x10\x02\x1a\x04mean\x12\x0c\x1a\x0A\x08\x01\x10\x03\x1a\x04mean\x12\x0D\x1a\x0b\x08\x01\x10\x04\x1a\x05count\x12\x0b\x1a\x09\x08\x01\x10\x05\x1a\x03sum\x12\x10\x1a\x0e\x08\x01\x10\x06\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x07\x1a\x08subtract\x12\x0b\x1a\x09\x08\x01\x10\x08\x1a\x03add\x12\x0b\x1a\x09\x08\x01\x10\x09\x1a\x03sum\x12\x0e\x1a\x0c\x08\x01\x10\x0A\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0b\x1a\x03and\x12\x17\x1a\x15\x08\x01\x10\x0c\x1a\x0fstring_sql_like\x12\x0e\x1a\x0c\x08\x01\x10\x0D\x1a\x06equals\x12\x0e\x1a\x0c\x08\x01\x10\x0e\x1a\x06equals\x12\x0b\x1a\x09\x08\x01\x10\x0f\x1a\x03min\x12\x0f\x1a\x0D\x08\x01\x10\x10\x1a\x07greater\x12\x0b\x1a\x09\x08\x01\x10\x11\x1a\x03any\x12\x15\x1a\x13\x08\x01\x10\x12\x1a\x0Dgreater_equal\x12\x0f\x1a\x0D\x08\x01\x10\x13\x1a\x07between\x12\x0c\x1a\x0A\x08\x01\x10\x14\x1a\x04less\x12\x10\x1a\x0e\x08\x01\x10\x15\x1a\x08subtract\x12\x10\x1a\x0e\x08\x01\x10\x16\x1a\x08multiply\x12\x0c\x1a\x0A\x08\x01\x10\x17\x1a\x04cast\x12\x14\x1a\x12\x08\x01\x10\x18\x1a\x0cextract_year\x12\x0f\x1a\x0D\x08\x01\x10\x19\x1a\x07greater\x12\x10\x1a\x0e\x08\x01\x10\x1a\x1a\x08multiply\x12\x10\x1a\x0e\x08\x01\x10\x1b\x1a\x08contains\x12\x12\x1a\x10\x08\x01\x10\x1c\x1a\x0Avalue_list\x12\x0b\x1a\x09\x08\x01\x10\x1d\x1a\x03sum\x12\x13\x1a\x11\x08\x01\x10\x1e\x1a\x0bsimple_case\x12\x0b\x1a\x09\x08\x01\x10\x1f\x1a\x03not\x12\x0D\x1a\x0b\x08\x01\x10 \x1a\x05count\x12\x0e\x1a\x0c\x08\x01\x10!\x1a\x06divide\x12\x10\x1a\x0e\x08\x01\x10"\x1a\x08multiply\x12\x15\x1a\x13\x08\x01\x10#\x1a\x0Dsearched_case\x12\x12\x1a\x10\x08\x01\x10$\x1a\x0Anot_equals\x12\x10\x1a\x0e\x08\x01\x10%\x1a\x08contains\x12\x10\x1a\x0e\x08\x01\x10&\x1a\x08contains\x12\x16\x1a\x14\x08\x01\x10\'\x1a\x0ecount_distinct\x12\x0c\x1a\x0A\x08\x01\x10(\x1a\x04less\x12\x10\x1a\x0e\x08\x01\x10)\x1a\x08multiply\x12\x0A\x1a\x08\x08\x01\x10*\x1a\x02or\x12\x15\x1a\x13\x08\x01\x10+\x1a\x0Dgreater_equal\x12\x12\x1a\x10\x08\x01\x10,\x1a\x0Aless_equal\x12\x0f\x1a\x0D\x08\x01\x10-\x1a\x07between\x12\x12\x1a\x10\x08\x01\x10.\x1a\x0Anot_equals\x12\x11\x1a\x0f\x08\x01\x10/\x1a\x09substring\x12\x0f\x1a\x0D\x08\x01\x100\x1a\x07greater\x1a\x84\x04\x12\x81\x04\x0A\xde\x03*\xdb\x03\x12\xca\x03"\xc7\x03\x12\x8d\x03:\x8a\x03\x12\xd7\x02\x12\xd4\x02\x12\x9f\x01\x0A\x9c\x01\x12\x8d\x01\x0A\x09c_custkey\x0A\x06c_name\x0A\x09c_address\x0A\x0bc_nationkey\x0A\x07c_phone\x0A\x09c_acctbal\x0A\x0cc_mktsegment\x0A\x09c_comment\x123\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x04*\x02\x10\x01\x0A\x04b\x02\x10\x01\x0A\x07\xc2\x01\x04\x08\x02\x10\x0f\x0A\x04b\x02\x10\x01\x0A\x04b\x02\x10\x01:\x0A\x0A\x08customer\x1a\xaf\x01\x1a\xac\x01\x08\x0b\x12h\x1af\x08\x0b\x12:\x1a8\x08\x1b\x12"\x1a \x08/\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00\x12\x04\x0A\x02\x10\x00\x12\x04\x0A\x02\x10\x02\x1a\x04b\x02\x10\x01\x12\x0A\x1a\x08\x08\x1c\x1a\x04b\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x12 \x1a\x1e\x080\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x05"\x00\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x128\x1a6\x08\x1f\x12,\x1a*\x08\x11\x12 \x1a\x1e\x08\x0A\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00\x12\x08\x12\x06\x0A\x02\x12\x00"\x00\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a\x04\x0A\x02\x10\x01\x1a"\x1a \x08/\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x04"\x00\x12\x04\x0A\x02\x10\x00\x12\x04\x0A\x02\x10\x02\x1a\x04b\x02\x10\x01\x1a\x0A\x12\x08\x0A\x04\x12\x02\x08\x05"\x00\x1a\x0A\x0A\x08\x12\x06\x0A\x02\x12\x00"\x00"\x0c\x0A\x0A\x08\x04 \x03*\x04:\x02\x10\x01"\x1b\x0A\x19\x08\x05\x12\x0A\x12\x08\x0A\x04\x12\x02\x08\x01"\x00 \x03*\x07\xc2\x01\x04\x08\x02\x10&\x1a\x0c\x0A\x08\x12\x06\x0A\x02\x12\x00"\x00\x10\x01\x12\x09cntrycode\x12\x07numcust\x12\x0Atotacctbal'
else:
return 'nanana'
def execute_substrait(duckdb_cursor, query_number):
return
# proto_bytes = get_query_binary(query_number)
# result = duckdb_cursor.from_substrait(proto_bytes).df().sort_index(ascending=False, axis=1)
# query = duckdb_cursor.execute("select query from tpch_queries() where query_nr="+str(query_number)).fetchone()[0]
# answers = duckdb_cursor.execute(query).df().sort_index(ascending=False, axis=1)
# print (result)
# print (answers)
# assert result.equals(answers)
class TestTPCHIbisSubstrait(object):
# # Ibis has a < instead of <= in the filter
# def test_q01(self,duckdb_cursor):
# execute_substrait(duckdb_cursor,1)
def test_q03(self,duckdb_cursor):
execute_substrait(duckdb_cursor,3)
# # We are missing any
# def test_q04(self,duckdb_cursor):
# execute_substrait(duckdb_cursor,4)
# # We are missing any
# def test_q05(self,duckdb_cursor):
# execute_substrait(duckdb_cursor,5)
def test_q06(self,duckdb_cursor):
execute_substrait(duckdb_cursor,6)
# It seems that Ibis is exporting a cast function with only one child?
# def test_q09(self,duckdb_cursor):
# execute_substrait(duckdb_cursor,9)
def test_q10(self,duckdb_cursor):
execute_substrait(duckdb_cursor,10)
# # FAILED test_ibis_tpch.py::TestTPCHIbisSubstrait::test_q11 - We seem to be missing a sum aggregation somewhere
# def test_q11(self,duckdb_cursor):
# execute_substrait(duckdb_cursor,11)
# # FAILED test_ibis_tpch.py::TestTPCHIbisSubstrait::test_q12 - RuntimeError: Catalog Error: Scalar Function with name value_list does not exist!
def test_q12(self,duckdb_cursor):
execute_substrait(duckdb_cursor,12)
# # FAILED test_ibis_tpch.py::TestTPCHIbisSubstrait::test_q13 - RuntimeError: Catalog Error: Scalar Function with name not does not exist!
def test_q13(self,duckdb_cursor):
execute_substrait(duckdb_cursor,13)
# FAILED test_ibis_tpch.py::TestTPCHIbisSubstrait::test_q16 - RuntimeError: Scalar Function with name not does not exist!
def test_q16(self,duckdb_cursor):
execute_substrait(duckdb_cursor,16)
# FAILED test_ibis_tpch.py::TestTPCHIbisSubstrait::test_q18 - assert False
def test_q18(self,duckdb_cursor):
execute_substrait(duckdb_cursor,18)
# FAILED test_ibis_tpch.py::TestTPCHIbisSubstrait::test_q19 - RuntimeError: Catalog Error: Scalar Function with name value_list does not exist!
def test_q19(self,duckdb_cursor):
execute_substrait(duckdb_cursor,19)
# FAILED test_ibis_tpch.py::TestTPCHIbisSubstrait::test_q20 - assert False
def test_q20(self,duckdb_cursor):
execute_substrait(duckdb_cursor,20)
# FAILED test_ibis_tpch.py::TestTPCHIbisSubstrait::test_q21 - RuntimeError: Catalog Error: Scalar Function with name any does not exist!
def test_q21(self,duckdb_cursor):
execute_substrait(duckdb_cursor,21)
# FAILED test_ibis_tpch.py::TestTPCHIbisSubstrait::test_q22 - RuntimeError: Catalog Error: Scalar Function with name value_list does not exist!
def test_q22(self,duckdb_cursor):
execute_substrait(duckdb_cursor,22)
| 649.818182
| 7,401
| 0.767501
| 15,626
| 71,480
| 3.468066
| 0.031614
| 0.070195
| 0.084533
| 0.070416
| 0.931484
| 0.924731
| 0.92152
| 0.896977
| 0.89587
| 0.893822
| 0
| 0.341431
| 0.012927
| 71,480
| 110
| 7,402
| 649.818182
| 0.42664
| 0.02777
| 0
| 0
| 0
| 1.295082
| 0.841964
| 0.839877
| 0
| 1
| 0
| 0
| 0
| 1
| 0.213115
| false
| 0
| 0.016393
| 0.016393
| 0.540984
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 15
|
698e7ef10363379a683464bf1e15626473faec8f
| 21,529
|
py
|
Python
|
src/atmos_flux_inversion/optimal_interpolation.py
|
DWesl/atmospheric-inverse-methods-for-flux-optimization
|
f8a3e8564dc3bf86df297a0683a2a52c657289d4
|
[
"BSD-3-Clause"
] | 4
|
2020-04-20T20:14:27.000Z
|
2022-02-28T16:49:58.000Z
|
src/atmos_flux_inversion/optimal_interpolation.py
|
DWesl/atmospheric-inverse-methods-for-flux-optimization
|
f8a3e8564dc3bf86df297a0683a2a52c657289d4
|
[
"BSD-3-Clause"
] | 6
|
2019-03-06T02:03:44.000Z
|
2020-08-04T17:07:12.000Z
|
src/atmos_flux_inversion/optimal_interpolation.py
|
DWesl/atmospheric-inverse-methods-for-flux-optimization
|
f8a3e8564dc3bf86df297a0683a2a52c657289d4
|
[
"BSD-3-Clause"
] | 1
|
2019-01-31T12:57:29.000Z
|
2019-01-31T12:57:29.000Z
|
"""Inversions using Optimal Interpolaiton.
Also known as Kalman Matrix Inversion or batch inversion.
"""
import scipy.linalg
from scipy.sparse.linalg import LinearOperator
from atmos_flux_inversion.util import method_common
from atmos_flux_inversion.linalg import (ProductLinearOperator, ARRAY_TYPES,
solve, tolinearoperator)
@method_common
def simple(background, background_covariance,
observations, observation_covariance,
observation_operator,
reduced_background_covariance,
reduced_observation_operator):
"""Solve the inversion problem using the equations literally.
Assumes all arrays fit in memory with room to spare. A direct
translation of the matrix inversion equations to Python.
Assumes everything follows a multivariate normal distribution
with the specified covariance matrices. Under this assumption
`analysis_covariance` is exact, and `analysis` is the Maximum
Likelihood Estimator and the Best Linear Unbiased Estimator
for the underlying state in the frequentist framework, and
specify the posterior distribution for the state in the
Bayesian framework. If these are not satisfied, these still
form the Generalized Least Squares estimates for the state and
an estimated uncertainty.
Parameters
----------
background: array_like[N]
The background state estimate.
background_covariance: array_like[N, N]
Covariance of background state estimate across
realizations/ensemble members. "Ensemble" is here
interpreted in the sense used in statistical mechanics or
frequentist statistics, and may not be derived from a
sample as in meteorological ensemble Kalman filters
observations: array_like[M]
The observations constraining the background estimate.
observation_covariance: array_like[M, M]
Covariance of observations across realizations/ensemble
members. "Ensemble" again has the statistical meaning.
observation_operator: array_like[M, N]
The relationship between the state and the observations.
reduced_background_covariance: array_like[Nred, Nred], optional
The covariance for a smaller state space, usually obtained by
reducing resolution in space and time. Note that
`reduced_observation_operator` must also be provided
reduced_observation_operator: array_like[M, Nred], optional
The relationship between the reduced state space and the
observations. Note that `reduced_background_covariance`
must also be provided.
Returns
-------
analysis: array_like[N]
Analysis state estimate
analysis_covariance: array_like[Nred, Nred] or array_like[N, N]
Estimated uncertainty of analysis across
realizations/ensemble members. Calculated using
reduced_background_covariance and
reduced_observation_operator if provided
"""
# \vec{y}_b = H \vec{x}_b
projected_obs = observation_operator.dot(background)
# \Delta\vec{y} = \vec{y} - \vec{y}_b
observation_increment = observations - projected_obs
# B_{proj} = HBH^T
if isinstance(observation_operator, LinearOperator):
projected_background_covariance = ProductLinearOperator(
observation_operator, background_covariance, observation_operator.T
)
else:
projected_background_covariance = observation_operator.dot(
background_covariance.dot(observation_operator.T))
if isinstance(observation_covariance, LinearOperator):
projected_background_covariance = tolinearoperator(
projected_background_covariance)
covariance_sum = projected_background_covariance + observation_covariance
# \Delta\vec{x} = B H^T (B_{proj} + R)^{-1} \Delta\vec{y}
analysis_increment = background_covariance.dot(
observation_operator.T.dot(
solve(
covariance_sum,
observation_increment)))
# \vec{x}_a = \vec{x}_b + \Delta\vec{x}
analysis = background + analysis_increment
# P_a = B - B H^T (B_{proj} + R)^{-1} H B
if reduced_background_covariance is None:
# The possibility that the arguments may be a mix of
# LinearOperators and arrays requires an odd evaluation order
# This is symmetric
# Calculate B H^T cov_sum^-1 H
most_of_decrease = background_covariance.dot(
observation_operator.T.dot(
solve(
covariance_sum,
observation_operator
)
)
)
# Transpose to get H^T cov_sum^-1 H B
# Then B * _ to get the decrease
decrease = background_covariance.dot(most_of_decrease.T)
if isinstance(background_covariance, LinearOperator):
decrease = tolinearoperator(decrease)
analysis_covariance = background_covariance - decrease
else:
# The possibility that the arguments may be a mix of
# LinearOperators and arrays requires an odd evaluation order
# This is symmetric
# Calculate B H^T cov_sum^-1 H
most_of_decrease = reduced_background_covariance.dot(
reduced_observation_operator.T.dot(
solve(
covariance_sum,
reduced_observation_operator
)
)
)
# Transpose to get H^T cov_sum^-1 H B
# Then B * _ to get the decrease
decrease = reduced_background_covariance.dot(most_of_decrease.T)
if isinstance(reduced_background_covariance, LinearOperator):
decrease = tolinearoperator(decrease)
analysis_covariance = reduced_background_covariance - decrease
return analysis, analysis_covariance
@method_common
def fold_common(background, background_covariance,
observations, observation_covariance,
observation_operator,
reduced_background_covariance,
reduced_observation_operator):
"""Solve the inversion problem, evaluating sub-expressions only once.
Assumes all arrays fit in memory with room to spare.
Assumes everything follows a multivariate normal distribution
with the specified covariance matrices. Under this assumption
`analysis_covariance` is exact, and `analysis` is the Maximum
Likelihood Estimator and the Best Linear Unbiased Estimator
for the underlying state in the frequentist framework, and
specify the posterior distribution for the state in the
Bayesian framework. If these are not satisfied, these still
form the Generalized Least Squares estimates for the state and
an estimated uncertainty.
Parameters
----------
background: array_like[N]
The background state estimate.
background_covariance: array_like[N, N]
Covariance of background state estimate across
realizations/ensemble members. "Ensemble" is here
interpreted in the sense used in statistical mechanics or
frequentist statistics, and may not be derived from a
sample as in meteorological ensemble Kalman filters
observations: array_like[M]
The observations constraining the background estimate.
observation_covariance: array_like[M, M]
Covariance of observations across realizations/ensemble
members. "Ensemble" again has the statistical meaning.
observation_operator: array_like[M, N]
The relationship between the state and the observations.
reduced_background_covariance: array_like[Nred, Nred], optional
The covariance for a smaller state space, usually obtained by
reducing resolution in space and time. Note that
`reduced_observation_operator` must also be provided
reduced_observation_operator: array_like[M, Nred], optional
The relationship between the reduced state space and the
observations. Note that `reduced_background_covariance`
must also be provided.
Returns
-------
analysis: array_like[N]
Analysis state estimate
analysis_covariance: array_like[Nred, Nred] or array_like[N, N]
Estimated uncertainty of analysis across
realizations/ensemble members. Calculated using
reduced_background_covariance and
reduced_observation_operator if possible
"""
# \vec{y}_b = H \vec{x}_b
projected_obs = observation_operator.dot(background)
# \Delta\vec{y} = \vec{y} - \vec{y}_b
innovation = (observations - projected_obs)
# B_{proj} = HBH^T
if isinstance(observation_operator, LinearOperator):
B_HT = tolinearoperator(background_covariance).dot(
observation_operator.T)
projected_background_covariance = ProductLinearOperator(
observation_operator, B_HT)
else:
B_HT = background_covariance.dot(observation_operator.T)
projected_background_covariance = observation_operator.dot(
B_HT)
if ((isinstance(projected_background_covariance, LinearOperator) ^
isinstance(observation_covariance, LinearOperator))):
covariance_sum = (tolinearoperator(projected_background_covariance) +
tolinearoperator(observation_covariance))
else:
covariance_sum = (projected_background_covariance +
observation_covariance)
# \Delta\vec{x} = B H^T (B_{proj} + R)^{-1} \Delta\vec{y}
# This does repeat work for in memory data, but is perhaps doable
# for out-of-core computations
observation_increment = solve(
covariance_sum, innovation)
analysis_increment = background_covariance.dot(
observation_operator.T.dot(
observation_increment))
# \vec{x}_a = \vec{x}_b + \Delta\vec{x}
analysis = background + analysis_increment
# P_a = B - B H^T (B_{proj} + R)^{-1} H B
if reduced_background_covariance is None:
decrease = B_HT.dot(solve(
covariance_sum,
B_HT.T))
if isinstance(decrease, LinearOperator):
background_covariance = tolinearoperator(
background_covariance)
analysis_covariance = background_covariance - decrease
else:
# The possibility that the arguments may be a mix of
# LinearOperators and arrays requires an odd evaluation order
# This is symmetric
# Calculate B H^T cov_sum^-1 H
most_of_decrease = reduced_background_covariance.dot(
reduced_observation_operator.T.dot(
solve(
covariance_sum,
reduced_observation_operator
)
)
)
# Transpose to get H^T cov_sum^-1 H B
# Then B * _ to get the decrease
decrease = reduced_background_covariance.dot(most_of_decrease.T)
if isinstance(reduced_background_covariance, LinearOperator):
decrease = tolinearoperator(decrease)
analysis_covariance = reduced_background_covariance - decrease
return analysis, analysis_covariance
@method_common
def save_sum(background, background_covariance,
observations, observation_covariance,
observation_operator,
reduced_background_covariance=None,
reduced_observation_operator=None):
"""Solve the inversion problem, evaluating sub-expressions only once.
Assumes all arrays fit in memory with room to spare.
Assumes everything follows a multivariate normal distribution
with the specified covariance matrices. Under this assumption
`analysis_covariance` is exact, and `analysis` is the Maximum
Likelihood Estimator and the Best Linear Unbiased Estimator
for the underlying state in the frequentist framework, and
specify the posterior distribution for the state in the
Bayesian framework. If these are not satisfied, these still
form the Generalized Least Squares estimates for the state and
an estimated uncertainty.
Parameters
----------
background: array_like[N]
The background state estimate.
background_covariance: array_like[N, N]
Covariance of background state estimate across
realizations/ensemble members. "Ensemble" is here
interpreted in the sense used in statistical mechanics or
frequentist statistics, and may not be derived from a
sample as in meteorological ensemble Kalman filters
observations: array_like[M]
The observations constraining the background estimate.
observation_covariance: array_like[M, M]
Covariance of observations across realizations/ensemble
members. "Ensemble" again has the statistical meaning.
observation_operator: array_like[M, N]
The relationship between the state and the observations.
reduced_background_covariance: array_like[Nred, Nred], optional
The covariance for a smaller state space, usually obtained by
reducing resolution in space and time. Note that
`reduced_observation_operator` must also be provided
reduced_observation_operator: array_like[M, Nred], optional
The relationship between the reduced state space and the
observations. Note that `reduced_background_covariance`
must also be provided.
Returns
-------
analysis: array_like[N]
Analysis state estimate
analysis_covariance: array_like[Nred, Nred] or array_like[N, N]
Estimated uncertainty of analysis across
realizations/ensemble members. Calculated using
reduced_background_covariance and
reduced_observation_operator if possible
"""
# \vec{y}_b = H \vec{x}_b
projected_obs = observation_operator.dot(background)
# \Delta\vec{y} = \vec{y} - \vec{y}_b
innovation = (observations - projected_obs)
# B_{proj} = HBH^T
if isinstance(observation_operator, ARRAY_TYPES):
# TODO: test this
if hasattr(background_covariance, "quadratic_form"):
projected_background_covariance = (
background_covariance.quadratic_form(
observation_operator.T))
else:
projected_background_covariance = observation_operator.dot(
background_covariance.dot(observation_operator.T))
else:
projected_background_covariance = ProductLinearOperator(
observation_operator, tolinearoperator(background_covariance),
observation_operator.T)
if ((isinstance(projected_background_covariance, LinearOperator) ^
isinstance(observation_covariance, LinearOperator))):
covariance_sum = (tolinearoperator(projected_background_covariance) +
tolinearoperator(observation_covariance))
else:
covariance_sum = (projected_background_covariance +
observation_covariance)
# \Delta\vec{x} = B H^T (B_{proj} + R)^{-1} \Delta\vec{y}
observation_increment = solve(covariance_sum, innovation)
analysis_increment = background_covariance.dot(
observation_operator.T.dot(
observation_increment))
# \vec{x}_a = \vec{x}_b + \Delta\vec{x}
analysis = background + analysis_increment
# P_a = B - B H^T (B_{proj} + R)^{-1} H B
if reduced_background_covariance is None:
if isinstance(observation_operator, ARRAY_TYPES):
B_HT = background_covariance.dot(observation_operator.T)
else:
B_HT = ProductLinearOperator(
tolinearoperator(background_covariance),
observation_operator.T
)
decrease = B_HT.dot(solve(
covariance_sum,
B_HT.T))
if isinstance(decrease, LinearOperator):
background_covariance = tolinearoperator(
background_covariance)
analysis_covariance = background_covariance - decrease
else:
# The possibility that the arguments may be a mix of
# LinearOperators and arrays requires an odd evaluation order
# This is symmetric
# Calculate B H^T cov_sum^-1 H
most_of_decrease = reduced_background_covariance.dot(
reduced_observation_operator.T.dot(
solve(
covariance_sum,
reduced_observation_operator
)
)
)
# Transpose to get H^T cov_sum^-1 H B
# Then B * _ to get the decrease
decrease = reduced_background_covariance.dot(most_of_decrease.T)
if isinstance(reduced_background_covariance, LinearOperator):
decrease = tolinearoperator(decrease)
analysis_covariance = reduced_background_covariance - decrease
return analysis, analysis_covariance
@method_common
def scipy_chol(background, background_covariance,
observations, observation_covariance,
observation_operator,
reduced_background_covariance=None,
reduced_observation_operator=None):
"""Use the Cholesky decomposition to solve the inverison problem.
Assumes all arrays fit in memory with room to spare.
Uses cholesky decomposition for solving a matrix equation
rather than using matrix inverses.
Assumes everything follows a multivariate normal distribution
with the specified covariance matrices. Under this assumption
`analysis_covariance` is exact, and `analysis` is the Maximum
Likelihood Estimator and the Best Linear Unbiased Estimator
for the underlying state in the frequentist framework, and
specify the posterior distribution for the state in the
Bayesian framework. If these are not satisfied, these still
form the Generalized Least Squares estimates for the state and
an estimated uncertainty.
Parameters
----------
background: array_like[N]
The background state estimate.
background_covariance: array_like[N, N]
Covariance of background state estimate across
realizations/ensemble members. "Ensemble" is here
interpreted in the sense used in statistical mechanics or
frequentist statistics, and may not be derived from a
sample as in meteorological ensemble Kalman filters
observations: array_like[M]
The observations constraining the background estimate.
observation_covariance: array_like[M, M]
Covariance of observations across realizations/ensemble
members. "Ensemble" again has the statistical meaning.
observation_operator: array_like[M, N]
The relationship between the state and the observations.
reduced_background_covariance: array_like[Nred, Nred], optional
The covariance for a smaller state space, usually obtained by
reducing resolution in space and time. Note that
`reduced_observation_operator` must also be provided
reduced_observation_operator: array_like[M, Nred], optional
The relationship between the reduced state space and the
observations. Note that `reduced_background_covariance`
must also be provided.
Returns
-------
analysis: array_like[N]
Analysis state estimate
analysis_covariance: array_like[Nred, Nred] or array_like[N, N]
Estimated uncertainty of analysis across
realizations/ensemble members. Calculated using
reduced_background_covariance and
reduced_observation_operator if possible
"""
# \vec{y}_b = H \vec{x}_b
projected_obs = observation_operator.dot(background)
# \Delta\vec{y} = \vec{y} - \vec{y}_b
innovation = observations - projected_obs
B_HT = background_covariance.dot(observation_operator.T)
# B_{proj} = HBH^T
projected_background_covariance = observation_operator.dot(
B_HT)
if isinstance(observation_covariance, LinearOperator):
projected_background_covariance = tolinearoperator(
projected_background_covariance)
covariance_sum = projected_background_covariance + observation_covariance
cov_sum_chol_up = scipy.linalg.cho_factor(covariance_sum, overwrite_a=True)
del covariance_sum
# \Delta\vec{x} = B H^T (B_{proj} + R)^{-1} \Delta\vec{y}
analysis_increment = B_HT.dot(
scipy.linalg.cho_solve(
cov_sum_chol_up,
innovation,
overwrite_b=True))
del innovation
# \vec{x}_a = \vec{x}_b + \Delta\vec{x}
analysis = background + analysis_increment
# P_a = B - B H^T (B_{proj} + R)^{-1} H B
if reduced_background_covariance is None:
decrease = B_HT.dot(
scipy.linalg.cho_solve(
cov_sum_chol_up,
B_HT.T,
overwrite_b=False))
if isinstance(background_covariance, LinearOperator):
decrease = tolinearoperator(decrease)
analysis_covariance = background_covariance - decrease
else:
B_HT_red = reduced_background_covariance.dot(
reduced_observation_operator.T)
decrease = B_HT_red.dot(
scipy.linalg.cho_solve(
cov_sum_chol_up,
B_HT_red.T,
overwrite_b=False))
if isinstance(reduced_background_covariance, LinearOperator):
decrease = tolinearoperator(decrease)
analysis_covariance = reduced_background_covariance - decrease
return analysis, analysis_covariance
| 41.642166
| 79
| 0.688745
| 2,409
| 21,529
| 5.961395
| 0.091739
| 0.123947
| 0.065803
| 0.027575
| 0.935102
| 0.933709
| 0.90899
| 0.906413
| 0.895899
| 0.885314
| 0
| 0.000995
| 0.253008
| 21,529
| 516
| 80
| 41.722868
| 0.891991
| 0.497561
| 0
| 0.744186
| 0
| 0
| 0.001399
| 0
| 0
| 0
| 0
| 0.001938
| 0
| 1
| 0.018605
| false
| 0
| 0.018605
| 0
| 0.055814
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
69bebd68847e37c845983985046015641173a4f6
| 30,615
|
py
|
Python
|
sdk/python/pulumi_azure/desktopvirtualization/scaling_plan.py
|
ScriptBox99/pulumi-azure
|
1b8c6d5479ccabc39094741eac25a8ca44c8833a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/desktopvirtualization/scaling_plan.py
|
ScriptBox99/pulumi-azure
|
1b8c6d5479ccabc39094741eac25a8ca44c8833a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/desktopvirtualization/scaling_plan.py
|
ScriptBox99/pulumi-azure
|
1b8c6d5479ccabc39094741eac25a8ca44c8833a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ScalingPlanArgs', 'ScalingPlan']
@pulumi.input_type
class ScalingPlanArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
schedules: pulumi.Input[Sequence[pulumi.Input['ScalingPlanScheduleArgs']]],
time_zone: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
exclusion_tag: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pools: Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanHostPoolArgs']]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ScalingPlan resource.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[Sequence[pulumi.Input['ScalingPlanScheduleArgs']]] schedules: One or more `schedule` blocks as defined below.
:param pulumi.Input[str] time_zone: Specifies the Time Zone which should be used by the Scaling Plan for time based events, [the possible values are defined here](https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/).
:param pulumi.Input[str] description: A description of the Scaling Plan.
:param pulumi.Input[str] exclusion_tag: The name of the tag associated with the VMs you want to exclude from autoscaling.
:param pulumi.Input[str] friendly_name: Friendly name of the Scaling Plan.
:param pulumi.Input[Sequence[pulumi.Input['ScalingPlanHostPoolArgs']]] host_pools: One or more `host_pool` blocks as defined below.
:param pulumi.Input[str] location: The Azure Region where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[str] name: The name which should be used for this Virtual Desktop Scaling Plan . Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Virtual Desktop Scaling Plan .
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "schedules", schedules)
pulumi.set(__self__, "time_zone", time_zone)
if description is not None:
pulumi.set(__self__, "description", description)
if exclusion_tag is not None:
pulumi.set(__self__, "exclusion_tag", exclusion_tag)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if host_pools is not None:
pulumi.set(__self__, "host_pools", host_pools)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def schedules(self) -> pulumi.Input[Sequence[pulumi.Input['ScalingPlanScheduleArgs']]]:
"""
One or more `schedule` blocks as defined below.
"""
return pulumi.get(self, "schedules")
@schedules.setter
def schedules(self, value: pulumi.Input[Sequence[pulumi.Input['ScalingPlanScheduleArgs']]]):
pulumi.set(self, "schedules", value)
@property
@pulumi.getter(name="timeZone")
def time_zone(self) -> pulumi.Input[str]:
"""
Specifies the Time Zone which should be used by the Scaling Plan for time based events, [the possible values are defined here](https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/).
"""
return pulumi.get(self, "time_zone")
@time_zone.setter
def time_zone(self, value: pulumi.Input[str]):
pulumi.set(self, "time_zone", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the Scaling Plan.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="exclusionTag")
def exclusion_tag(self) -> Optional[pulumi.Input[str]]:
"""
The name of the tag associated with the VMs you want to exclude from autoscaling.
"""
return pulumi.get(self, "exclusion_tag")
@exclusion_tag.setter
def exclusion_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "exclusion_tag", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the Scaling Plan.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="hostPools")
def host_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanHostPoolArgs']]]]:
"""
One or more `host_pool` blocks as defined below.
"""
return pulumi.get(self, "host_pools")
@host_pools.setter
def host_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanHostPoolArgs']]]]):
pulumi.set(self, "host_pools", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Region where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Virtual Desktop Scaling Plan . Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags which should be assigned to the Virtual Desktop Scaling Plan .
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _ScalingPlanState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
exclusion_tag: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pools: Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanHostPoolArgs']]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
schedules: Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanScheduleArgs']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
time_zone: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ScalingPlan resources.
:param pulumi.Input[str] description: A description of the Scaling Plan.
:param pulumi.Input[str] exclusion_tag: The name of the tag associated with the VMs you want to exclude from autoscaling.
:param pulumi.Input[str] friendly_name: Friendly name of the Scaling Plan.
:param pulumi.Input[Sequence[pulumi.Input['ScalingPlanHostPoolArgs']]] host_pools: One or more `host_pool` blocks as defined below.
:param pulumi.Input[str] location: The Azure Region where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[str] name: The name which should be used for this Virtual Desktop Scaling Plan . Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[Sequence[pulumi.Input['ScalingPlanScheduleArgs']]] schedules: One or more `schedule` blocks as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Virtual Desktop Scaling Plan .
:param pulumi.Input[str] time_zone: Specifies the Time Zone which should be used by the Scaling Plan for time based events, [the possible values are defined here](https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/).
"""
if description is not None:
pulumi.set(__self__, "description", description)
if exclusion_tag is not None:
pulumi.set(__self__, "exclusion_tag", exclusion_tag)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if host_pools is not None:
pulumi.set(__self__, "host_pools", host_pools)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if schedules is not None:
pulumi.set(__self__, "schedules", schedules)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if time_zone is not None:
pulumi.set(__self__, "time_zone", time_zone)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the Scaling Plan.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="exclusionTag")
def exclusion_tag(self) -> Optional[pulumi.Input[str]]:
"""
The name of the tag associated with the VMs you want to exclude from autoscaling.
"""
return pulumi.get(self, "exclusion_tag")
@exclusion_tag.setter
def exclusion_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "exclusion_tag", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the Scaling Plan.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="hostPools")
def host_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanHostPoolArgs']]]]:
"""
One or more `host_pool` blocks as defined below.
"""
return pulumi.get(self, "host_pools")
@host_pools.setter
def host_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanHostPoolArgs']]]]):
pulumi.set(self, "host_pools", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Region where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Virtual Desktop Scaling Plan . Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def schedules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanScheduleArgs']]]]:
"""
One or more `schedule` blocks as defined below.
"""
return pulumi.get(self, "schedules")
@schedules.setter
def schedules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanScheduleArgs']]]]):
pulumi.set(self, "schedules", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags which should be assigned to the Virtual Desktop Scaling Plan .
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="timeZone")
def time_zone(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the Time Zone which should be used by the Scaling Plan for time based events, [the possible values are defined here](https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/).
"""
return pulumi.get(self, "time_zone")
@time_zone.setter
def time_zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_zone", value)
class ScalingPlan(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
exclusion_tag: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanHostPoolArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
schedules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanScheduleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
time_zone: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Virtual Desktop Scaling Plan.
## Disclaimers
> **Note** Scaling Plans are currently in preview and are only supported in a limited number of regions. Both the Scaling Plan and any referenced Host Pools must be deployed in a supported region. [Autoscale (preview) for Azure Virtual Desktop host pools](https://docs.microsoft.com/en-us/azure/virtual-desktop/autoscale-scaling-plan).
> **Note** Scaling Plans require specific permissions to be granted to the Windows Virtual Desktop application before a 'host_pool' can be configured. [Required Permissions for Scaling Plans](https://docs.microsoft.com/en-us/azure/virtual-desktop/autoscale-scaling-plan#create-a-custom-rbac-role).
## Import
Virtual Desktop Scaling Plans can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:desktopvirtualization/scalingPlan:ScalingPlan example /subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/scalingPlans/plan1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description of the Scaling Plan.
:param pulumi.Input[str] exclusion_tag: The name of the tag associated with the VMs you want to exclude from autoscaling.
:param pulumi.Input[str] friendly_name: Friendly name of the Scaling Plan.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanHostPoolArgs']]]] host_pools: One or more `host_pool` blocks as defined below.
:param pulumi.Input[str] location: The Azure Region where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[str] name: The name which should be used for this Virtual Desktop Scaling Plan . Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanScheduleArgs']]]] schedules: One or more `schedule` blocks as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Virtual Desktop Scaling Plan .
:param pulumi.Input[str] time_zone: Specifies the Time Zone which should be used by the Scaling Plan for time based events, [the possible values are defined here](https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ScalingPlanArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Virtual Desktop Scaling Plan.
## Disclaimers
> **Note** Scaling Plans are currently in preview and are only supported in a limited number of regions. Both the Scaling Plan and any referenced Host Pools must be deployed in a supported region. [Autoscale (preview) for Azure Virtual Desktop host pools](https://docs.microsoft.com/en-us/azure/virtual-desktop/autoscale-scaling-plan).
> **Note** Scaling Plans require specific permissions to be granted to the Windows Virtual Desktop application before a 'host_pool' can be configured. [Required Permissions for Scaling Plans](https://docs.microsoft.com/en-us/azure/virtual-desktop/autoscale-scaling-plan#create-a-custom-rbac-role).
## Import
Virtual Desktop Scaling Plans can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:desktopvirtualization/scalingPlan:ScalingPlan example /subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/scalingPlans/plan1
```
:param str resource_name: The name of the resource.
:param ScalingPlanArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ScalingPlanArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
exclusion_tag: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanHostPoolArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
schedules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanScheduleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
time_zone: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ScalingPlanArgs.__new__(ScalingPlanArgs)
__props__.__dict__["description"] = description
__props__.__dict__["exclusion_tag"] = exclusion_tag
__props__.__dict__["friendly_name"] = friendly_name
__props__.__dict__["host_pools"] = host_pools
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if schedules is None and not opts.urn:
raise TypeError("Missing required property 'schedules'")
__props__.__dict__["schedules"] = schedules
__props__.__dict__["tags"] = tags
if time_zone is None and not opts.urn:
raise TypeError("Missing required property 'time_zone'")
__props__.__dict__["time_zone"] = time_zone
super(ScalingPlan, __self__).__init__(
'azure:desktopvirtualization/scalingPlan:ScalingPlan',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
exclusion_tag: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanHostPoolArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
schedules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanScheduleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
time_zone: Optional[pulumi.Input[str]] = None) -> 'ScalingPlan':
"""
Get an existing ScalingPlan resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description of the Scaling Plan.
:param pulumi.Input[str] exclusion_tag: The name of the tag associated with the VMs you want to exclude from autoscaling.
:param pulumi.Input[str] friendly_name: Friendly name of the Scaling Plan.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanHostPoolArgs']]]] host_pools: One or more `host_pool` blocks as defined below.
:param pulumi.Input[str] location: The Azure Region where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[str] name: The name which should be used for this Virtual Desktop Scaling Plan . Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanScheduleArgs']]]] schedules: One or more `schedule` blocks as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Virtual Desktop Scaling Plan .
:param pulumi.Input[str] time_zone: Specifies the Time Zone which should be used by the Scaling Plan for time based events, [the possible values are defined here](https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/).
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ScalingPlanState.__new__(_ScalingPlanState)
__props__.__dict__["description"] = description
__props__.__dict__["exclusion_tag"] = exclusion_tag
__props__.__dict__["friendly_name"] = friendly_name
__props__.__dict__["host_pools"] = host_pools
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["schedules"] = schedules
__props__.__dict__["tags"] = tags
__props__.__dict__["time_zone"] = time_zone
return ScalingPlan(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description of the Scaling Plan.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="exclusionTag")
def exclusion_tag(self) -> pulumi.Output[Optional[str]]:
"""
The name of the tag associated with the VMs you want to exclude from autoscaling.
"""
return pulumi.get(self, "exclusion_tag")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[Optional[str]]:
"""
Friendly name of the Scaling Plan.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="hostPools")
def host_pools(self) -> pulumi.Output[Optional[Sequence['outputs.ScalingPlanHostPool']]]:
"""
One or more `host_pool` blocks as defined below.
"""
return pulumi.get(self, "host_pools")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The Azure Region where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name which should be used for this Virtual Desktop Scaling Plan . Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the Resource Group where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def schedules(self) -> pulumi.Output[Sequence['outputs.ScalingPlanSchedule']]:
"""
One or more `schedule` blocks as defined below.
"""
return pulumi.get(self, "schedules")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags which should be assigned to the Virtual Desktop Scaling Plan .
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="timeZone")
def time_zone(self) -> pulumi.Output[str]:
"""
Specifies the Time Zone which should be used by the Scaling Plan for time based events, [the possible values are defined here](https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/).
"""
return pulumi.get(self, "time_zone")
| 50.687086
| 343
| 0.670652
| 3,748
| 30,615
| 5.322305
| 0.063501
| 0.095398
| 0.074393
| 0.062863
| 0.908863
| 0.894375
| 0.880389
| 0.867455
| 0.860086
| 0.836876
| 0
| 0.004694
| 0.227601
| 30,615
| 603
| 344
| 50.771144
| 0.83888
| 0.385661
| 0
| 0.775568
| 1
| 0
| 0.10916
| 0.030894
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161932
| false
| 0.002841
| 0.019886
| 0
| 0.278409
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
69de6401acdb7d4b1720dc51198b2e85f637d702
| 15,720
|
py
|
Python
|
tests/functional/test_push.py
|
AgeOfLearning/uget-cli
|
ecaa54db9c9a6bd22f3ce2099b28828ea42cb853
|
[
"MIT"
] | 1
|
2019-03-03T21:19:51.000Z
|
2019-03-03T21:19:51.000Z
|
tests/functional/test_push.py
|
AgeOfLearning/uget-cli
|
ecaa54db9c9a6bd22f3ce2099b28828ea42cb853
|
[
"MIT"
] | 3
|
2018-12-31T20:11:03.000Z
|
2021-11-15T17:47:57.000Z
|
tests/functional/test_push.py
|
AgeOfLearning/uget-cli
|
ecaa54db9c9a6bd22f3ce2099b28828ea42cb853
|
[
"MIT"
] | 2
|
2019-02-14T01:08:57.000Z
|
2019-03-03T21:19:53.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Functional tests for `ugetcli` package - `push` command.
Tests functionality of the cli push command with various options.
"""
import unittest
import os
import json
from click.testing import CliRunner
from mock import MagicMock, patch
from ugetcli import cli
from ugetcli.utils import create_empty_file
class TestUGetCliPush(unittest.TestCase):
"""Tests for `ugetcli` package - pack command."""
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.NuGetRunner')
def test_cli_uget_push(
self, nuget_runner_mock, csproj_mock):
"""Test cli: uget pack with default values when path contains .csproj"""
nuget_runner_instance = MagicMock()
nuget_runner_mock.return_value = nuget_runner_instance
nuget_runner_mock.locate_nuget.return_value = "nuget.exe"
nuget_runner_mock.get_normalized_nuget_pack_version.return_value = "1.2.3"
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.2.3"
csproj_mock.return_value = csproj_instance
csproj_mock.get_csproj_at_path.return_value = "TestProject.csproj"
runner = CliRunner(env={"NUGET_PATH": None, "NUGET_API_KEY": None})
with runner.isolated_filesystem():
os.mkdir("Output")
create_empty_file("Output/TestProject.1.2.3.nupkg")
result = runner.invoke(cli.ugetcli, ['push'], obj={})
assert result.exit_code == 0, result
nuget_runner_mock.assert_called_with('nuget.exe', False)
nuget_runner_instance.push.assert_called_with(os.path.normpath("Output/TestProject.1.2.3.nupkg"), None, None)
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.NuGetRunner')
def test_cli_uget_push_path_csproj(
self, nuget_runner_mock, csproj_mock):
"""Test cli: uget pack with default values when path directly points to .csproj"""
nuget_runner_instance = MagicMock()
nuget_runner_mock.return_value = nuget_runner_instance
nuget_runner_mock.locate_nuget.return_value = "nuget.exe"
nuget_runner_mock.get_normalized_nuget_pack_version.return_value = "1.2.3"
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.2.3"
csproj_mock.return_value = csproj_instance
csproj_mock.get_csproj_at_path.return_value = "TestProject.csproj"
runner = CliRunner(env={"NUGET_PATH": None, "NUGET_API_KEY": None})
with runner.isolated_filesystem():
os.mkdir("Output")
create_empty_file("Output/TestProject.1.2.3.nupkg")
result = runner.invoke(cli.ugetcli, ['push', '--path', 'TestProject.csproj'], obj={})
assert result.exit_code == 0, result
nuget_runner_mock.assert_called_with('nuget.exe', False)
nuget_runner_instance.push.assert_called_with(os.path.normpath("Output/TestProject.1.2.3.nupkg"), None, None)
csproj_mock.get_csproj_at_path.assert_called_with('TestProject.csproj')
@patch('ugetcli.uget.NuGetRunner')
def test_cli_uget_push_path_nupkg(
self, nuget_runner_mock):
"""Test cli: uget pack with default values when path points to a .nupkg file"""
nuget_runner_instance = MagicMock()
nuget_runner_mock.return_value = nuget_runner_instance
nuget_runner_mock.locate_nuget.return_value = "nuget.exe"
runner = CliRunner(env={"NUGET_PATH": None, "NUGET_API_KEY": None})
with runner.isolated_filesystem():
create_empty_file("myproject.nupkg")
result = runner.invoke(cli.ugetcli, ['push', '--path', 'myproject.nupkg'], obj={})
assert result.exit_code == 0, result
nuget_runner_mock.assert_called_with('nuget.exe', False)
nuget_runner_instance.push.assert_called_with(os.path.normpath("myproject.nupkg"), None, None)
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.NuGetRunner')
def test_cli_uget_push_path_csproj_with_output_dir(
self, nuget_runner_mock, csproj_mock):
"""Test cli: uget pack with default values when path directly points to .csproj and --output-dir is set"""
nuget_runner_instance = MagicMock()
nuget_runner_mock.return_value = nuget_runner_instance
nuget_runner_mock.locate_nuget.return_value = "nuget.exe"
nuget_runner_mock.get_normalized_nuget_pack_version.return_value = "1.2.3"
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.2.3"
csproj_mock.return_value = csproj_instance
csproj_mock.get_csproj_at_path.return_value = "TestProject.csproj"
runner = CliRunner(env={"NUGET_PATH": None, "NUGET_API_KEY": None})
with runner.isolated_filesystem():
os.makedirs("MyOutput")
create_empty_file("MyOutput/TestProject.1.2.3.nupkg")
result = runner.invoke(cli.ugetcli, ['push', '--path', 'TestProject.csproj', '--output-dir', 'MyOutput'],
obj={})
assert result.exit_code == 0, result
nuget_runner_mock.assert_called_with('nuget.exe', False)
nuget_runner_instance.push.assert_called_with(os.path.normpath("MyOutput/TestProject.1.2.3.nupkg"), None, None)
csproj_mock.get_csproj_at_path.assert_called_with('TestProject.csproj')
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.NuGetRunner')
def test_cli_uget_push_with_feed(
self, nuget_runner_mock, csproj_mock):
"""Test cli: uget pack with --feed"""
nuget_runner_instance = MagicMock()
nuget_runner_mock.return_value = nuget_runner_instance
nuget_runner_mock.locate_nuget.return_value = "nuget.exe"
nuget_runner_mock.get_normalized_nuget_pack_version.return_value = "1.2.3"
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.2.3"
csproj_mock.return_value = csproj_instance
csproj_mock.get_csproj_at_path.return_value = "TestProject.csproj"
runner = CliRunner(env={"NUGET_PATH": None, "NUGET_API_KEY": None})
with runner.isolated_filesystem():
os.mkdir("Output")
create_empty_file("Output/TestProject.1.2.3.nupkg")
result = runner.invoke(cli.ugetcli, ['push', '--feed', 'http://test.com/feed'], obj={})
assert result.exit_code == 0, result
nuget_runner_mock.assert_called_with('nuget.exe', False)
nuget_runner_instance.push.assert_called_with(
os.path.normpath("Output/TestProject.1.2.3.nupkg"), 'http://test.com/feed', None)
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.NuGetRunner')
def test_cli_uget_push_with_nuget_path(
self, nuget_runner_mock, csproj_mock):
"""Test cli: uget pack with --nuget-path"""
nuget_runner_instance = MagicMock()
nuget_runner_mock.return_value = nuget_runner_instance
nuget_runner_mock.locate_nuget.return_value = "custom_nuget.exe"
nuget_runner_mock.get_normalized_nuget_pack_version.return_value = "1.2.3"
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.2.3"
csproj_mock.return_value = csproj_instance
csproj_mock.get_csproj_at_path.return_value = "TestProject.csproj"
runner = CliRunner(env={"NUGET_PATH": None, "NUGET_API_KEY": None})
with runner.isolated_filesystem():
os.mkdir("Output")
create_empty_file("Output/TestProject.1.2.3.nupkg")
result = runner.invoke(cli.ugetcli, ['push', '--nuget-path', 'custom_nuget.exe'], obj={})
assert result.exit_code == 0, result
nuget_runner_mock.assert_called_with('custom_nuget.exe', False)
nuget_runner_mock.locat_nuget.assert_called_with("custom_nuget.exe")
nuget_runner_instance.push.assert_called_with(
os.path.normpath("Output/TestProject.1.2.3.nupkg"), None, None)
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.NuGetRunner')
def test_cli_uget_push_with_nuget_path(
self, nuget_runner_mock, csproj_mock):
"""Test cli: uget pack with NUGET_PATH env variable set"""
nuget_runner_instance = MagicMock()
nuget_runner_mock.return_value = nuget_runner_instance
nuget_runner_mock.valid_nuget_executable.return_value = True
nuget_runner_mock.get_normalized_nuget_pack_version.return_value = "1.2.3"
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.2.3"
csproj_mock.return_value = csproj_instance
csproj_mock.get_csproj_at_path.return_value = "TestProject.csproj"
runner = CliRunner(env={"NUGET_PATH": "custom_nuget.exe", "NUGET_API_KEY": None})
with runner.isolated_filesystem():
os.mkdir("Output")
create_empty_file("Output/TestProject.1.2.3.nupkg")
result = runner.invoke(cli.ugetcli, ['push'], obj={})
assert result.exit_code == 0, result
nuget_runner_mock.assert_called_with('custom_nuget.exe', False)
nuget_runner_mock.valid_nuget_executable.assert_called_with("custom_nuget.exe")
nuget_runner_instance.push.assert_called_with(
os.path.normpath("Output/TestProject.1.2.3.nupkg"), None, None)
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.NuGetRunner')
def test_cli_uget_push_with_api_key(
self, nuget_runner_mock, csproj_mock):
"""Test cli: uget pack with --api-key"""
nuget_runner_instance = MagicMock()
nuget_runner_mock.return_value = nuget_runner_instance
nuget_runner_mock.locate_nuget.return_value = "nuget.exe"
nuget_runner_mock.get_normalized_nuget_pack_version.return_value = "1.2.3"
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.2.3"
csproj_mock.return_value = csproj_instance
csproj_mock.get_csproj_at_path.return_value = "TestProject.csproj"
runner = CliRunner(env={"NUGET_PATH": None, "NUGET_API_KEY": None})
with runner.isolated_filesystem():
os.mkdir("Output")
create_empty_file("Output/TestProject.1.2.3.nupkg")
result = runner.invoke(cli.ugetcli, ['push', '--api-key', 'myapikey'], obj={})
assert result.exit_code == 0, result
nuget_runner_mock.assert_called_with('nuget.exe', False)
nuget_runner_instance.push.assert_called_with(
os.path.normpath("Output/TestProject.1.2.3.nupkg"), None, "myapikey")
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.NuGetRunner')
def test_cli_uget_push_with_api_key_env(
self, nuget_runner_mock, csproj_mock):
"""Test cli: uget pack with API_KEY env variable"""
nuget_runner_instance = MagicMock()
nuget_runner_mock.return_value = nuget_runner_instance
nuget_runner_mock.locate_nuget.return_value = "nuget.exe"
nuget_runner_mock.get_normalized_nuget_pack_version.return_value = "1.2.3"
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.2.3"
csproj_mock.return_value = csproj_instance
csproj_mock.get_csproj_at_path.return_value = "TestProject.csproj"
runner = CliRunner(env={"NUGET_PATH": None, "NUGET_API_KEY": "myapikey"})
with runner.isolated_filesystem():
os.mkdir("Output")
create_empty_file("Output/TestProject.1.2.3.nupkg")
result = runner.invoke(cli.ugetcli, ['push'], obj={})
assert result.exit_code == 0, result
nuget_runner_mock.assert_called_with('nuget.exe', False)
nuget_runner_instance.push.assert_called_with(
os.path.normpath("Output/TestProject.1.2.3.nupkg"), None, "myapikey")
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.NuGetRunner')
def test_cli_uget_push_with_config_json(
self, nuget_runner_mock, csproj_mock):
"""Test cli: uget pack with config json"""
nuget_runner_instance = MagicMock()
nuget_runner_mock.return_value = nuget_runner_instance
nuget_runner_mock.locate_nuget.return_value = "nuget.exe"
nuget_runner_mock.get_normalized_nuget_pack_version.return_value = "1.2.3"
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.2.3"
csproj_mock.return_value = csproj_instance
csproj_mock.get_csproj_at_path.return_value = "TestProject.csproj"
config_data = {
"output_dir": "CustomOutput",
"feed": "http://test.com/nuget",
"nuget_path": "custom_nuget.exe",
"api_key": "myapikey123"
}
runner = CliRunner(env={"NUGET_PATH": None, "NUGET_API_KEY": None})
with runner.isolated_filesystem():
os.makedirs("CustomOutput")
create_empty_file("CustomOutput/TestProject.1.2.3.nupkg")
result = runner.invoke(cli.ugetcli, ['push', '--config', json.dumps(config_data)], obj={})
assert result.exit_code == 0, result
nuget_runner_mock.assert_called_with('custom_nuget.exe', False)
nuget_runner_instance.push.assert_called_with(
os.path.normpath("CustomOutput/TestProject.1.2.3.nupkg"), "http://test.com/nuget", "myapikey123")
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.NuGetRunner')
def test_cli_uget_push_with_config_file(
self, nuget_runner_mock, csproj_mock):
"""Test cli: uget pack with config file"""
nuget_runner_instance = MagicMock()
nuget_runner_mock.return_value = nuget_runner_instance
nuget_runner_mock.locate_nuget.return_value = "nuget.exe"
nuget_runner_mock.get_normalized_nuget_pack_version.return_value = "1.2.3"
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.2.3"
csproj_mock.return_value = csproj_instance
csproj_mock.get_csproj_at_path.return_value = "TestProject.csproj"
config_data = {
"output_dir": "CustomOutput",
"feed": "http://test.com/nuget",
"nuget_path": "custom_nuget.exe",
"api_key": "myapikey123"
}
runner = CliRunner(env={"NUGET_PATH": None, "NUGET_API_KEY": None})
with runner.isolated_filesystem():
with open('config_test.json', 'w') as f:
json.dump(config_data, f)
os.makedirs("CustomOutput")
create_empty_file("CustomOutput/TestProject.1.2.3.nupkg")
result = runner.invoke(cli.ugetcli, ['push', '--config-path', 'config_test.json'], obj={})
assert result.exit_code == 0, result
nuget_runner_mock.assert_called_with('custom_nuget.exe', False)
nuget_runner_instance.push.assert_called_with(
os.path.normpath("CustomOutput/TestProject.1.2.3.nupkg"), "http://test.com/nuget", "myapikey123")
| 47.781155
| 119
| 0.690967
| 1,994
| 15,720
| 5.110331
| 0.058676
| 0.096075
| 0.082434
| 0.037291
| 0.931992
| 0.927772
| 0.922277
| 0.922277
| 0.917174
| 0.917174
| 0
| 0.011399
| 0.196374
| 15,720
| 328
| 120
| 47.926829
| 0.795219
| 0.051272
| 0
| 0.832685
| 0
| 0
| 0.180893
| 0.060096
| 0
| 0
| 0
| 0
| 0.143969
| 1
| 0.042802
| false
| 0
| 0.027237
| 0
| 0.07393
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.