hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acfd6da096b94ef9904be8bcf945fbc761f09828
| 6,657
|
py
|
Python
|
clients/client/python/ory_client/model/ui_node_image_attributes.py
|
simoneromano96/sdk
|
a6113d0daefbbb803790297e4b242d4c7cbbcb22
|
[
"Apache-2.0"
] | null | null | null |
clients/client/python/ory_client/model/ui_node_image_attributes.py
|
simoneromano96/sdk
|
a6113d0daefbbb803790297e4b242d4c7cbbcb22
|
[
"Apache-2.0"
] | null | null | null |
clients/client/python/ory_client/model/ui_node_image_attributes.py
|
simoneromano96/sdk
|
a6113d0daefbbb803790297e4b242d4c7cbbcb22
|
[
"Apache-2.0"
] | null | null | null |
"""
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.9
Contact: support@ory.sh
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from ory_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class UiNodeImageAttributes(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'src': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'src': 'src', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, src, *args, **kwargs): # noqa: E501
"""UiNodeImageAttributes - a model defined in OpenAPI
Args:
src (str): The image's source URL. format: uri
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.src = src
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 38.929825
| 194
| 0.585849
|
acfd6df649ede5f992cae534018c1d49e0a855b8
| 17,019
|
py
|
Python
|
research/object_detection/anchor_generators/multiple_grid_anchor_generator.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | 1
|
2019-09-11T09:41:11.000Z
|
2019-09-11T09:41:11.000Z
|
research/object_detection/anchor_generators/multiple_grid_anchor_generator.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/anchor_generators/multiple_grid_anchor_generator.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates grid anchors on the fly corresponding to multiple CNN layers.
Generates grid anchors on the fly corresponding to multiple CNN layers as
described in:
"SSD: Single Shot MultiBox Detector"
Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,
Cheng-Yang Fu, Alexander C. Berg
(see Section 2.2: Choosing scales and aspect ratios for default boxes)
"""
import numpy as np
import tensorflow as tf
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.core import anchor_generator
from object_detection.core import box_list_ops
class MultipleGridAnchorGenerator(anchor_generator.AnchorGenerator):
"""Generate a grid of anchors for multiple CNN layers."""
def __init__(self,
box_specs_list,
base_anchor_size=None,
anchor_strides=None,
anchor_offsets=None,
clip_window=None):
"""Constructs a MultipleGridAnchorGenerator.
To construct anchors, at multiple grid resolutions, one must provide a
list of feature_map_shape_list (e.g., [(8, 8), (4, 4)]), and for each grid
size, a corresponding list of (scale, aspect ratio) box specifications.
For example:
box_specs_list = [[(.1, 1.0), (.1, 2.0)], # for 8x8 grid
[(.2, 1.0), (.3, 1.0), (.2, 2.0)]] # for 4x4 grid
To support the fully convolutional setting, we pass grid sizes in at
generation time, while scale and aspect ratios are fixed at construction
time.
Args:
box_specs_list: list of list of (scale, aspect ratio) pairs with the
outside list having the same number of entries as feature_map_shape_list
(which is passed in at generation time).
base_anchor_size: base anchor size as [height, width]
(length-2 float numpy or Tensor, default=[1.0, 1.0]).
The height and width values are normalized to the
minimum dimension of the input height and width, so that
when the base anchor height equals the base anchor
width, the resulting anchor is square even if the input
image is not square.
anchor_strides: list of pairs of strides in pixels (in y and x directions
respectively). For example, setting anchor_strides=[(25, 25), (50, 50)]
means that we want the anchors corresponding to the first layer to be
strided by 25 pixels and those in the second layer to be strided by 50
pixels in both y and x directions. If anchor_strides=None, they are set
to be the reciprocal of the corresponding feature map shapes.
anchor_offsets: list of pairs of offsets in pixels (in y and x directions
respectively). The offset specifies where we want the center of the
(0, 0)-th anchor to lie for each layer. For example, setting
anchor_offsets=[(10, 10), (20, 20)]) means that we want the
(0, 0)-th anchor of the first layer to lie at (10, 10) in pixel space
and likewise that we want the (0, 0)-th anchor of the second layer to
lie at (25, 25) in pixel space. If anchor_offsets=None, then they are
set to be half of the corresponding anchor stride.
clip_window: a tensor of shape [4] specifying a window to which all
anchors should be clipped. If clip_window is None, then no clipping
is performed.
Raises:
ValueError: if box_specs_list is not a list of list of pairs
ValueError: if clip_window is not either None or a tensor of shape [4]
"""
if isinstance(box_specs_list, list) and all(
[isinstance(list_item, list) for list_item in box_specs_list]):
self._box_specs = box_specs_list
else:
raise ValueError('box_specs_list is expected to be a '
'list of lists of pairs')
if base_anchor_size is None:
base_anchor_size = [256, 256]
self._base_anchor_size = base_anchor_size
self._anchor_strides = anchor_strides
self._anchor_offsets = anchor_offsets
if clip_window is not None and clip_window.get_shape().as_list() != [4]:
raise ValueError('clip_window must either be None or a shape [4] tensor')
self._clip_window = clip_window
self._scales = []
self._aspect_ratios = []
for box_spec in self._box_specs:
if not all([isinstance(entry, tuple) and len(entry) == 2
for entry in box_spec]):
raise ValueError('box_specs_list is expected to be a '
'list of lists of pairs')
scales, aspect_ratios = zip(*box_spec)
self._scales.append(scales)
self._aspect_ratios.append(aspect_ratios)
for arg, arg_name in zip([self._anchor_strides, self._anchor_offsets],
['anchor_strides', 'anchor_offsets']):
if arg and not (isinstance(arg, list) and
len(arg) == len(self._box_specs)):
raise ValueError('%s must be a list with the same length '
'as self._box_specs' % arg_name)
if arg and not all([
isinstance(list_item, tuple) and len(list_item) == 2
for list_item in arg
]):
raise ValueError('%s must be a list of pairs.' % arg_name)
def name_scope(self):
return 'MultipleGridAnchorGenerator'
def num_anchors_per_location(self):
"""Returns the number of anchors per spatial location.
Returns:
a list of integers, one for each expected feature map to be passed to
the Generate function.
"""
return [len(box_specs) for box_specs in self._box_specs]
def _generate(self, feature_map_shape_list, im_height=1, im_width=1):
"""Generates a collection of bounding boxes to be used as anchors.
The number of anchors generated for a single grid with shape MxM where we
place k boxes over each grid center is k*M^2 and thus the total number of
anchors is the sum over all grids. In our box_specs_list example
(see the constructor docstring), we would place two boxes over each grid
point on an 8x8 grid and three boxes over each grid point on a 4x4 grid and
thus end up with 2*8^2 + 3*4^2 = 176 anchors in total. The layout of the
output anchors follows the order of how the grid sizes and box_specs are
specified (with box_spec index varying the fastest, followed by width
index, then height index, then grid index).
Args:
feature_map_shape_list: list of pairs of convnet layer resolutions in the
format [(height_0, width_0), (height_1, width_1), ...]. For example,
setting feature_map_shape_list=[(8, 8), (7, 7)] asks for anchors that
correspond to an 8x8 layer followed by a 7x7 layer.
im_height: the height of the image to generate the grid for. If both
im_height and im_width are 1, the generated anchors default to
absolute coordinates, otherwise normalized coordinates are produced.
im_width: the width of the image to generate the grid for. If both
im_height and im_width are 1, the generated anchors default to
absolute coordinates, otherwise normalized coordinates are produced.
Returns:
boxes_list: a list of BoxLists each holding anchor boxes corresponding to
the input feature map shapes.
Raises:
ValueError: if feature_map_shape_list, box_specs_list do not have the same
length.
ValueError: if feature_map_shape_list does not consist of pairs of
integers
"""
if not (isinstance(feature_map_shape_list, list)
and len(feature_map_shape_list) == len(self._box_specs)):
raise ValueError('feature_map_shape_list must be a list with the same '
'length as self._box_specs')
if not all([isinstance(list_item, tuple) and len(list_item) == 2
for list_item in feature_map_shape_list]):
raise ValueError('feature_map_shape_list must be a list of pairs.')
im_height = tf.cast(im_height, dtype=tf.float32)
im_width = tf.cast(im_width, dtype=tf.float32)
if not self._anchor_strides:
anchor_strides = [(1.0 / tf.cast(pair[0], dtype=tf.float32),
1.0 / tf.cast(pair[1], dtype=tf.float32))
for pair in feature_map_shape_list]
else:
anchor_strides = [(tf.cast(stride[0], dtype=tf.float32) / im_height,
tf.cast(stride[1], dtype=tf.float32) / im_width)
for stride in self._anchor_strides]
if not self._anchor_offsets:
anchor_offsets = [(0.5 * stride[0], 0.5 * stride[1])
for stride in anchor_strides]
else:
anchor_offsets = [(tf.cast(offset[0], dtype=tf.float32) / im_height,
tf.cast(offset[1], dtype=tf.float32) / im_width)
for offset in self._anchor_offsets]
for arg, arg_name in zip([anchor_strides, anchor_offsets],
['anchor_strides', 'anchor_offsets']):
if not (isinstance(arg, list) and len(arg) == len(self._box_specs)):
raise ValueError('%s must be a list with the same length '
'as self._box_specs' % arg_name)
if not all([isinstance(list_item, tuple) and len(list_item) == 2
for list_item in arg]):
raise ValueError('%s must be a list of pairs.' % arg_name)
anchor_grid_list = []
min_im_shape = tf.minimum(im_height, im_width)
scale_height = min_im_shape / im_height
scale_width = min_im_shape / im_width
if not tf.contrib.framework.is_tensor(self._base_anchor_size):
base_anchor_size = [
scale_height * tf.constant(self._base_anchor_size[0],
dtype=tf.float32),
scale_width * tf.constant(self._base_anchor_size[1],
dtype=tf.float32)
]
else:
base_anchor_size = [
scale_height * self._base_anchor_size[0],
scale_width * self._base_anchor_size[1]
]
for feature_map_index, (grid_size, scales, aspect_ratios, stride,
offset) in enumerate(
zip(feature_map_shape_list, self._scales,
self._aspect_ratios, anchor_strides,
anchor_offsets)):
tiled_anchors = grid_anchor_generator.tile_anchors(
grid_height=grid_size[0],
grid_width=grid_size[1],
scales=scales,
aspect_ratios=aspect_ratios,
base_anchor_size=base_anchor_size,
anchor_stride=stride,
anchor_offset=offset)
if self._clip_window is not None:
tiled_anchors = box_list_ops.clip_to_window(
tiled_anchors, self._clip_window, filter_nonoverlapping=False)
num_anchors_in_layer = tiled_anchors.num_boxes_static()
if num_anchors_in_layer is None:
num_anchors_in_layer = tiled_anchors.num_boxes()
anchor_indices = feature_map_index * tf.ones([num_anchors_in_layer])
tiled_anchors.add_field('feature_map_index', anchor_indices)
anchor_grid_list.append(tiled_anchors)
return anchor_grid_list
def create_ssd_anchors(num_layers=6,
min_scale=0.2,
max_scale=0.95,
scales=None,
aspect_ratios=(1.0, 2.0, 3.0, 1.0 / 2, 1.0 / 3),
interpolated_scale_aspect_ratio=1.0,
base_anchor_size=None,
anchor_strides=None,
anchor_offsets=None,
reduce_boxes_in_lowest_layer=True):
"""Creates MultipleGridAnchorGenerator for SSD anchors.
This function instantiates a MultipleGridAnchorGenerator that reproduces
``default box`` construction proposed by Liu et al in the SSD paper.
See Section 2.2 for details. Grid sizes are assumed to be passed in
at generation time from finest resolution to coarsest resolution --- this is
used to (linearly) interpolate scales of anchor boxes corresponding to the
intermediate grid sizes.
Anchors that are returned by calling the `generate` method on the returned
MultipleGridAnchorGenerator object are always in normalized coordinates
and clipped to the unit square: (i.e. all coordinates lie in [0, 1]x[0, 1]).
Args:
num_layers: integer number of grid layers to create anchors for (actual
grid sizes passed in at generation time)
min_scale: scale of anchors corresponding to finest resolution (float)
max_scale: scale of anchors corresponding to coarsest resolution (float)
scales: As list of anchor scales to use. When not None and not empty,
min_scale and max_scale are not used.
aspect_ratios: list or tuple of (float) aspect ratios to place on each
grid point.
interpolated_scale_aspect_ratio: An additional anchor is added with this
aspect ratio and a scale interpolated between the scale for a layer
and the scale for the next layer (1.0 for the last layer).
This anchor is not included if this value is 0.
base_anchor_size: base anchor size as [height, width].
The height and width values are normalized to the minimum dimension of the
input height and width, so that when the base anchor height equals the
base anchor width, the resulting anchor is square even if the input image
is not square.
anchor_strides: list of pairs of strides in pixels (in y and x directions
respectively). For example, setting anchor_strides=[(25, 25), (50, 50)]
means that we want the anchors corresponding to the first layer to be
strided by 25 pixels and those in the second layer to be strided by 50
pixels in both y and x directions. If anchor_strides=None, they are set to
be the reciprocal of the corresponding feature map shapes.
anchor_offsets: list of pairs of offsets in pixels (in y and x directions
respectively). The offset specifies where we want the center of the
(0, 0)-th anchor to lie for each layer. For example, setting
anchor_offsets=[(10, 10), (20, 20)]) means that we want the
(0, 0)-th anchor of the first layer to lie at (10, 10) in pixel space
and likewise that we want the (0, 0)-th anchor of the second layer to lie
at (25, 25) in pixel space. If anchor_offsets=None, then they are set to
be half of the corresponding anchor stride.
reduce_boxes_in_lowest_layer: a boolean to indicate whether the fixed 3
boxes per location is used in the lowest layer.
Returns:
a MultipleGridAnchorGenerator
"""
if base_anchor_size is None:
base_anchor_size = [1.0, 1.0]
box_specs_list = []
if scales is None or not scales:
scales = [min_scale + (max_scale - min_scale) * i / (num_layers - 1)
for i in range(num_layers)] + [1.0]
else:
# Add 1.0 to the end, which will only be used in scale_next below and used
# for computing an interpolated scale for the largest scale in the list.
scales += [1.0]
for layer, scale, scale_next in zip(
range(num_layers), scales[:-1], scales[1:]):
layer_box_specs = []
if layer == 0 and reduce_boxes_in_lowest_layer:
layer_box_specs = [(0.1, 1.0), (scale, 2.0), (scale, 0.5)]
else:
for aspect_ratio in aspect_ratios:
layer_box_specs.append((scale, aspect_ratio))
# Add one more anchor, with a scale between the current scale, and the
# scale for the next layer, with a specified aspect ratio (1.0 by
# default).
if interpolated_scale_aspect_ratio > 0.0:
layer_box_specs.append((np.sqrt(scale*scale_next),
interpolated_scale_aspect_ratio))
box_specs_list.append(layer_box_specs)
return MultipleGridAnchorGenerator(box_specs_list, base_anchor_size,
anchor_strides, anchor_offsets)
| 49.618076
| 81
| 0.651625
|
acfd6e0eee8269f1b2e1306ae0246fddbbec1146
| 626
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/scatterternary/marker/line/_coloraxis.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/scatterternary/marker/line/_coloraxis.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/scatterternary/marker/line/_coloraxis.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class ColoraxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(
self,
plotly_name="coloraxis",
parent_name="scatterternary.marker.line",
**kwargs
):
super(ColoraxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", None),
edit_type=kwargs.pop("edit_type", "calc"),
regex=kwargs.pop("regex", "/^coloraxis([2-9]|[1-9][0-9]+)?$/"),
role=kwargs.pop("role", "info"),
**kwargs
)
| 31.3
| 75
| 0.589457
|
acfd6effa7c65cb049ddce6d44a55428975befca
| 539
|
py
|
Python
|
cinema_environment/server_monolith/server_app/migrations/0017_auto_20200227_2127.py
|
AndrewMalitchuk/cinema-server-monolith
|
0002f672c3389f187c4668b7f87e9ea7273900a7
|
[
"Apache-2.0"
] | null | null | null |
cinema_environment/server_monolith/server_app/migrations/0017_auto_20200227_2127.py
|
AndrewMalitchuk/cinema-server-monolith
|
0002f672c3389f187c4668b7f87e9ea7273900a7
|
[
"Apache-2.0"
] | null | null | null |
cinema_environment/server_monolith/server_app/migrations/0017_auto_20200227_2127.py
|
AndrewMalitchuk/cinema-server-monolith
|
0002f672c3389f187c4668b7f87e9ea7273900a7
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.0.2 on 2020-02-27 19:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('server_app', '0016_ticket_timeline_id'),
]
operations = [
migrations.RemoveField(
model_name='ticket',
name='cinema_id',
),
migrations.RemoveField(
model_name='ticket',
name='date',
),
migrations.RemoveField(
model_name='ticket',
name='film_id',
),
]
| 20.730769
| 50
| 0.543599
|
acfd7061c93b89eb63d705916e6a4047f1f3b060
| 1,140
|
py
|
Python
|
basic_elements/class_and_static_methods.py
|
ppinko/python_knowledge_library
|
089348c80e3f49a4a56839bfb921033e5386f07e
|
[
"Apache-2.0"
] | null | null | null |
basic_elements/class_and_static_methods.py
|
ppinko/python_knowledge_library
|
089348c80e3f49a4a56839bfb921033e5386f07e
|
[
"Apache-2.0"
] | null | null | null |
basic_elements/class_and_static_methods.py
|
ppinko/python_knowledge_library
|
089348c80e3f49a4a56839bfb921033e5386f07e
|
[
"Apache-2.0"
] | null | null | null |
"""
This script show an example presenting use of @staticmethod and @classmethod
"""
class MyClass:
def method(self):
return 'instance method called', self
@classmethod
def classmethod(cls):
return 'class method called', cls
@staticmethod
def staticmethod():
return 'static method called'
class Pizza:
def __init__(self, ingredients):
self.ingredients = ingredients
def __repr__(self):
return f'Pizza({self.ingredients!r})'
@classmethod
def margherita(cls):
return cls(['mozzarella', 'tomatoes'])
@classmethod
def prosciutto(cls):
return cls(['mozzarella', 'tomatoes', 'ham'])
##############################################################
import math
class Pizza:
def __init__(self, radius, ingredients):
self.radius = radius
self.ingredients = ingredients
def __repr__(self):
return (f'Pizza({self.radius!r}, '
f'{self.ingredients!r})')
def area(self):
return self.circle_area(self.radius)
@staticmethod
def circle_area(r):
return r ** 2 * math.pi
| 21.923077
| 76
| 0.589474
|
acfd711ebe988cd8915dadcb5994456479c7e421
| 4,819
|
py
|
Python
|
how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/tf_mnist_with_checkpoint.py
|
mnozary/MachineLearningNotebooks
|
f5c2ccccdc3177d47f3cc7886f99f82b09f55898
|
[
"MIT"
] | 1
|
2019-12-29T00:29:34.000Z
|
2019-12-29T00:29:34.000Z
|
how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/tf_mnist_with_checkpoint.py
|
mnozary/MachineLearningNotebooks
|
f5c2ccccdc3177d47f3cc7886f99f82b09f55898
|
[
"MIT"
] | null | null | null |
how-to-use-azureml/ml-frameworks/tensorflow/training/train-tensorflow-resume-training/tf_mnist_with_checkpoint.py
|
mnozary/MachineLearningNotebooks
|
f5c2ccccdc3177d47f3cc7886f99f82b09f55898
|
[
"MIT"
] | 1
|
2020-07-30T13:30:18.000Z
|
2020-07-30T13:30:18.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import argparse
import os
import re
import tensorflow as tf
import glob
from azureml.core import Run
from utils import load_data
print("TensorFlow version:", tf.VERSION)
parser = argparse.ArgumentParser()
parser.add_argument('--data-folder', type=str, dest='data_folder', help='data folder mounting point')
parser.add_argument('--resume-from', type=str, default=None,
help='location of the model or checkpoint files from where to resume the training')
args = parser.parse_args()
previous_model_location = args.resume_from
# You can also use environment variable to get the model/checkpoint files location
# previous_model_location = os.path.expandvars(os.getenv("AZUREML_DATAREFERENCE_MODEL_LOCATION", None))
data_folder = args.data_folder
print('Data folder:', data_folder)
# load train and test set into numpy arrays
# note we scale the pixel intensity values to 0-1 (by dividing it with 255.0) so the model can converge faster.
X_train = load_data(glob.glob(os.path.join(data_folder, '**/train-images-idx3-ubyte.gz'),
recursive=True)[0], False) / 255.0
X_test = load_data(glob.glob(os.path.join(data_folder, '**/t10k-images-idx3-ubyte.gz'),
recursive=True)[0], False) / 255.0
y_train = load_data(glob.glob(os.path.join(data_folder, '**/train-labels-idx1-ubyte.gz'),
recursive=True)[0], True).reshape(-1)
y_test = load_data(glob.glob(os.path.join(data_folder, '**/t10k-labels-idx1-ubyte.gz'),
recursive=True)[0], True).reshape(-1)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape, sep='\n')
training_set_size = X_train.shape[0]
n_inputs = 28 * 28
n_h1 = 100
n_h2 = 100
n_outputs = 10
learning_rate = 0.01
n_epochs = 20
batch_size = 50
with tf.name_scope('network'):
# construct the DNN
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name='X')
y = tf.placeholder(tf.int64, shape=(None), name='y')
h1 = tf.layers.dense(X, n_h1, activation=tf.nn.relu, name='h1')
h2 = tf.layers.dense(h1, n_h2, activation=tf.nn.relu, name='h2')
output = tf.layers.dense(h2, n_outputs, name='output')
with tf.name_scope('train'):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=output)
loss = tf.reduce_mean(cross_entropy, name='loss')
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss)
with tf.name_scope('eval'):
correct = tf.nn.in_top_k(output, y, 1)
acc_op = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# start an Azure ML run
run = Run.get_context()
with tf.Session() as sess:
start_epoch = 0
if previous_model_location:
checkpoint_file_path = tf.train.latest_checkpoint(previous_model_location)
saver.restore(sess, checkpoint_file_path)
checkpoint_filename = os.path.basename(checkpoint_file_path)
num_found = re.search(r'\d+', checkpoint_filename)
if num_found:
start_epoch = int(num_found.group(0))
print("Resuming from epoch {}".format(str(start_epoch)))
else:
init.run()
for epoch in range(start_epoch, n_epochs):
# randomly shuffle training set
indices = np.random.permutation(training_set_size)
X_train = X_train[indices]
y_train = y_train[indices]
# batch index
b_start = 0
b_end = b_start + batch_size
for _ in range(training_set_size // batch_size):
# get a batch
X_batch, y_batch = X_train[b_start: b_end], y_train[b_start: b_end]
# update batch index for the next batch
b_start = b_start + batch_size
b_end = min(b_start + batch_size, training_set_size)
# train
sess.run(train_op, feed_dict={X: X_batch, y: y_batch})
# evaluate training set
acc_train = acc_op.eval(feed_dict={X: X_batch, y: y_batch})
# evaluate validation set
acc_val = acc_op.eval(feed_dict={X: X_test, y: y_test})
# log accuracies
run.log('training_acc', np.float(acc_train))
run.log('validation_acc', np.float(acc_val))
print(epoch, '-- Training accuracy:', acc_train, '\b Validation accuracy:', acc_val)
y_hat = np.argmax(output.eval(feed_dict={X: X_test}), axis=1)
if epoch % 5 == 0:
saver.save(sess, './outputs/', global_step=epoch)
# saving only half of the model and resuming again from same epoch
if not previous_model_location and epoch == 10:
break
run.log('final_acc', np.float(acc_val))
| 36.78626
| 111
| 0.671716
|
acfd72291a78b67d9058d1e796ef6957309f9ef1
| 92
|
py
|
Python
|
demo2/demo2_app/apps.py
|
mpasternak/pytest-django-pytest-splinter-test
|
843577e05a91545e4ff1d687b3fd56f25e0e22d3
|
[
"Unlicense"
] | null | null | null |
demo2/demo2_app/apps.py
|
mpasternak/pytest-django-pytest-splinter-test
|
843577e05a91545e4ff1d687b3fd56f25e0e22d3
|
[
"Unlicense"
] | null | null | null |
demo2/demo2_app/apps.py
|
mpasternak/pytest-django-pytest-splinter-test
|
843577e05a91545e4ff1d687b3fd56f25e0e22d3
|
[
"Unlicense"
] | null | null | null |
from django.apps import AppConfig
class Demo2AppConfig(AppConfig):
name = 'demo2_app'
| 15.333333
| 33
| 0.76087
|
acfd72df39a39dc4804cfb04bf6ec4ed74abe59a
| 1,077
|
py
|
Python
|
cambiaahora/configuracion/migrations/0002_logoapoyan.py
|
shiminasai/plataforma_FADCANIC
|
14831525e4104da7f05ce87e59238f1e4193c184
|
[
"MIT"
] | null | null | null |
cambiaahora/configuracion/migrations/0002_logoapoyan.py
|
shiminasai/plataforma_FADCANIC
|
14831525e4104da7f05ce87e59238f1e4193c184
|
[
"MIT"
] | null | null | null |
cambiaahora/configuracion/migrations/0002_logoapoyan.py
|
shiminasai/plataforma_FADCANIC
|
14831525e4104da7f05ce87e59238f1e4193c184
|
[
"MIT"
] | 2
|
2015-05-19T20:50:41.000Z
|
2015-05-19T20:51:04.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import sorl.thumbnail.fields
from django.conf import settings
import cambiaahora.utils
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('configuracion', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LogoApoyan',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(max_length=250, verbose_name='siglas organizmo')),
('foto', sorl.thumbnail.fields.ImageField(upload_to=cambiaahora.utils.get_file_path, null=True, verbose_name='Foto', blank=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Logo apoyan',
'verbose_name_plural': 'Logo apoyan',
},
),
]
| 33.65625
| 146
| 0.624884
|
acfd730c17d5584c2b0075914baf5b4835c76276
| 67
|
py
|
Python
|
blesuite/replay/btsnoop/android/__init__.py
|
jreynders/BLESuite-1
|
1c3c15fc2d4e30c3f9c1a15e0268cae84685784b
|
[
"MIT"
] | 198
|
2016-08-04T05:45:38.000Z
|
2022-02-17T08:30:58.000Z
|
blesuite/replay/btsnoop/android/__init__.py
|
jreynders/BLESuite-1
|
1c3c15fc2d4e30c3f9c1a15e0268cae84685784b
|
[
"MIT"
] | 13
|
2018-02-04T14:16:16.000Z
|
2020-10-09T02:16:24.000Z
|
blesuite/replay/btsnoop/android/__init__.py
|
jreynders/BLESuite-1
|
1c3c15fc2d4e30c3f9c1a15e0268cae84685784b
|
[
"MIT"
] | 57
|
2016-08-08T04:24:04.000Z
|
2022-01-24T08:43:02.000Z
|
from . import executor
from . import phone
from . import snoopphone
| 22.333333
| 24
| 0.791045
|
acfd7386dd9b39cbe964add9b30d0613bb6f3fa0
| 5,898
|
py
|
Python
|
needlestack/servicers/settings.py
|
needlehaystack/needlestack
|
e00529a2a7c2d85059936a85f54dfb55e515b6ef
|
[
"Apache-2.0"
] | 3
|
2019-10-03T22:15:21.000Z
|
2022-02-08T09:05:41.000Z
|
needlestack/servicers/settings.py
|
cungtv/needlestack
|
e00529a2a7c2d85059936a85f54dfb55e515b6ef
|
[
"Apache-2.0"
] | 1
|
2021-04-30T21:08:47.000Z
|
2021-04-30T21:08:47.000Z
|
needlestack/servicers/settings.py
|
cungtv/needlestack
|
e00529a2a7c2d85059936a85f54dfb55e515b6ef
|
[
"Apache-2.0"
] | 2
|
2019-08-02T19:13:09.000Z
|
2019-10-25T01:47:17.000Z
|
from typing import List, Optional
import grpc
from grpc import ServerCredentials, ChannelCredentials
class BaseConfig(object):
"""Base configuration for gRPC services
Attributes:
DEBUG: Attach a stream handler to console for logger
DEBUG_LOG_FORMAT: Format string for debug logger
LOG_LEVEL: Level for logger
LOG_FORMAT_DATE: Format string for date
LOG_FILE: Filepath to log file
LOG_FILE_BACKUPS: Number of log files to keep in rotation
LOG_FILE_LOG_FORMAT: Format string for file logger
LOG_FILE_MAX_BYTES: Max byte size for log file
MAX_WORKERS: Number of worker threads per gRPC server
HOSTNAME: Hostname of node
SERVICER_PORT: Port of gRPC server
MUTUAL_TLS: Require server and client to authenticate each other the CA
SSL_CA_CERT_CHAIN: Certificate authority certificate chain bytes
SSL_CA_CERT_CHAIN_FILE: Certificate authority certificate chain file
SSL_SERVER_PRIVATE_KEY: Server private key bytes
SSL_SERVER_PRIVATE_KEY_FILE: Server private key file
SSL_SERVER_CERT_CHAIN: Server certificate chain bytes
SSL_SERVER_CERT_CHAIN_FILE: Server certificate chain file
SSL_CLIENT_PRIVATE_KEY: Client private key bytes
SSL_CLIENT_PRIVATE_KEY_FILE: Client private key file
SSL_CLIENT_CERT_CHAIN: Client certificate chain bytes
SSL_CLIENT_CERT_CHAIN_FILE: Client certificate chain file
CLUSTER_NAME: Name for Needlestack cluster
ZOOKEEPER_ROOT: Root path on Zookeeper
ZOOKEEPER_HOSTS: List of Zookeeper host for cluster manager
hostport: Hostport to gRPC server
use_mutual_tls: Should server and clients be authenticated
use_server_ssl: Should server be authenticated
ssl_server_credentials: gRPC SSL server credentials
"""
DEBUG = False
DEBUG_LOG_FORMAT = (
"%(asctime)s [%(name)s] [%(threadName)-10s] [%(levelname)s] - %(message)s"
)
LOG_LEVEL = "WARNING"
LOG_FORMAT_DATE = "%Y-%m-%d %H:%M:%S"
LOG_FILE: Optional[str] = None
LOG_FILE_BACKUPS: int
LOG_FILE_LOG_FORMAT = "%(asctime)s [%(name)s] [%(thread)d] [%(process)d] [%(levelname)s] - %(message)s"
LOG_FILE_MAX_BYTES: int
MAX_WORKERS: int
HOSTNAME: str
SERVICER_PORT: int
MUTUAL_TLS: bool = False
SSL_CA_CERT_CHAIN: Optional[bytes] = None
SSL_CA_CERT_CHAIN_FILE: Optional[str] = None
SSL_SERVER_PRIVATE_KEY: Optional[bytes] = None
SSL_SERVER_PRIVATE_KEY_FILE: Optional[str] = None
SSL_SERVER_CERT_CHAIN: Optional[bytes] = None
SSL_SERVER_CERT_CHAIN_FILE: Optional[str] = None
SSL_CLIENT_PRIVATE_KEY: Optional[bytes] = None
SSL_CLIENT_PRIVATE_KEY_FILE: Optional[str] = None
SSL_CLIENT_CERT_CHAIN: Optional[bytes] = None
SSL_CLIENT_CERT_CHAIN_FILE: Optional[str] = None
CLUSTER_NAME: str
ZOOKEEPER_ROOT = "/needlestack"
ZOOKEEPER_HOSTS: List[str]
@property
def hostport(self) -> str:
return f"{self.HOSTNAME}:{self.SERVICER_PORT}"
@property
def use_mutual_tls(self) -> bool:
return self.MUTUAL_TLS
@property
def use_server_ssl(self) -> bool:
return (
self.SSL_SERVER_PRIVATE_KEY is not None
or self.SSL_SERVER_PRIVATE_KEY_FILE is not None
) and (
self.SSL_SERVER_CERT_CHAIN is not None
or self.SSL_SERVER_CERT_CHAIN_FILE is not None
)
@property
def use_channel_ssl(self):
return (
self.SSL_CA_CERT_CHAIN is not None
or self.SSL_CA_CERT_CHAIN_FILE is not None
)
@property
def ca_certificate(self) -> Optional[bytes]:
return self._get_credential("SSL_CA_CERT_CHAIN")
@property
def server_private_key(self) -> Optional[bytes]:
return self._get_credential("SSL_SERVER_PRIVATE_KEY")
@property
def server_certificate(self) -> Optional[bytes]:
return self._get_credential("SSL_SERVER_CERT_CHAIN")
@property
def channel_private_key(self) -> Optional[bytes]:
return self._get_credential("SSL_CLIENT_PRIVATE_KEY")
@property
def channel_certificate(self) -> Optional[bytes]:
return self._get_credential("SSL_CLIENT_CERT_CHAIN")
@property
def ssl_server_credentials(self) -> Optional[ServerCredentials]:
if self.use_server_ssl:
pairs = [(self.server_private_key, self.server_certificate)]
ca_certificate = self.ca_certificate if self.use_mutual_tls else None
return grpc.ssl_server_credentials(
private_key_certificate_chain_pairs=pairs,
root_certificates=ca_certificate,
)
else:
return None
@property
def ssl_channel_credentials(self) -> Optional[ChannelCredentials]:
if self.use_channel_ssl:
return grpc.ssl_channel_credentials(
root_certificates=self.ca_certificate,
private_key=self.channel_private_key,
certificate_chain=self.channel_certificate,
)
else:
return None
def _get_credential(self, name: str) -> Optional[bytes]:
data = getattr(self, name, None)
filename = getattr(self, f"{name}_FILE", None)
if data:
return data
elif filename:
with open(filename, "rb") as f:
return f.read()
else:
return None
class TestConfig(BaseConfig):
"""Configs for local test environment"""
DEBUG = True
LOG_LEVEL = "DEBUG"
LOG_FILE = "/tmp/needlestack.log"
LOG_FILE_MAX_BYTES = 1 * 1024 ** 2 # 1 MB
LOG_FILE_BACKUPS = 2
MAX_WORKERS = 2
HOSTNAME = "localhost"
SERVICER_PORT = 50051
CLUSTER_NAME = "test_needlestack"
ZOOKEEPER_HOSTS = ["zoo1:2181", "zoo2:2181", "zoo3:2181"]
| 33.896552
| 107
| 0.676331
|
acfd74b4187205b67451a1973ccaff3b95a332e6
| 3,378
|
py
|
Python
|
iast/views/api_route_related_request.py
|
luzhongyang/DongTai-webapi
|
f07b2b1bc1222999d0bb7e3300e65c953ee966f5
|
[
"Apache-2.0"
] | 6
|
2021-09-01T07:37:37.000Z
|
2022-02-10T08:28:47.000Z
|
iast/views/api_route_related_request.py
|
luzhongyang/DongTai-webapi
|
f07b2b1bc1222999d0bb7e3300e65c953ee966f5
|
[
"Apache-2.0"
] | 51
|
2021-11-09T09:19:05.000Z
|
2022-02-10T02:37:04.000Z
|
iast/views/api_route_related_request.py
|
luzhongyang/DongTai-webapi
|
f07b2b1bc1222999d0bb7e3300e65c953ee966f5
|
[
"Apache-2.0"
] | 21
|
2021-09-01T06:32:19.000Z
|
2022-03-03T03:23:37.000Z
|
######################################################################
# @author : bidaya0 (bidaya0@$HOSTNAME)
# @file : api_route_related_request
# @created : Saturday Aug 21, 2021 13:54:14 CST
#
# @description :
######################################################################
from dongtai.models.api_route import IastApiRoute, IastApiMethod, IastApiRoute, HttpMethod, IastApiResponse, IastApiMethodHttpMethodRelation, IastApiParameter
from dongtai.models.agent_method_pool import MethodPool
from iast.base.project_version import get_project_version, get_project_version_by_id
from dongtai.endpoint import R, UserEndPoint
from dongtai.models.agent import IastAgent
from django.utils.translation import gettext_lazy as _
from django.db.models import Q
from django.forms.models import model_to_dict
from iast.utils import sha1
from iast.utils import extend_schema_with_envcheck, get_response_serializer
from rest_framework import serializers
class ApiRouteCoverRelationSerializer(serializers.ModelSerializer):
class Meta:
model = MethodPool
fields = serializers.ALL_FIELDS
_GetResponseSerializer = get_response_serializer(ApiRouteCoverRelationSerializer())
class ApiRouteRelationRequest(UserEndPoint):
@extend_schema_with_envcheck(
[{
'name': 'api_route_id',
'type': int
}, {
'name': 'project_id',
'type': int
}, {
'name': 'version_id',
'type': int
}],
tags=[_('API Route')],
summary=_('API Route Relation Request'),
description=
_("Get the coverrate of the project corresponding to the specified id."
),
response_schema=_GetResponseSerializer,
)
def get(self, request):
try:
page_size = int(request.query_params.get('page_size', 1))
page_index = int(request.query_params.get('page_index', 1))
api_route_id = int(request.query_params.get('api_route_id', 1))
api_route = IastApiRoute.objects.filter(pk=api_route_id).first()
if api_route is None:
return R.failure(msg=_("API not Fould"))
project_id = int(request.query_params.get('project_id', None))
auth_users = self.get_auth_users(request.user)
version_id = int(request.query_params.get('version_id', None))
except:
return R.failure(_("Parameter error"))
if project_id:
if not version_id:
current_project_version = get_project_version(
project_id, auth_users)
else:
current_project_version = get_project_version_by_id(version_id)
agents = IastAgent.objects.filter(
user__in=auth_users,
bind_project_id=project_id,
project_version_id=current_project_version.get(
"version_id", 0)).values("id")
q = Q()
q = q & Q(agent_id__in=[_['id'] for _ in agents]) if project_id else q
q = q & Q(uri_sha1=sha1(api_route.path))
q = q & Q(
http_method__in=[_.method for _ in api_route.method.http_method.all()])
method = MethodPool.objects.filter(q).order_by('-update_time')[0:1].values()
data = list(method)[0] if method else {}
return R.success(data=data)
| 42.225
| 158
| 0.625222
|
acfd756432936301a9d95da63d2b71ea9f2b3fe9
| 7,716
|
py
|
Python
|
pythalesians/market/loaders/lowlevel/bbg/loaderbbg.py
|
PauloRui/pythalesians
|
a32f884a83476a6a6e7e77aa1a3f5f53468bad66
|
[
"Apache-2.0"
] | 1
|
2021-07-05T13:21:28.000Z
|
2021-07-05T13:21:28.000Z
|
pythalesians/market/loaders/lowlevel/bbg/loaderbbg.py
|
PauloRui/pythalesians
|
a32f884a83476a6a6e7e77aa1a3f5f53468bad66
|
[
"Apache-2.0"
] | null | null | null |
pythalesians/market/loaders/lowlevel/bbg/loaderbbg.py
|
PauloRui/pythalesians
|
a32f884a83476a6a6e7e77aa1a3f5f53468bad66
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'saeedamen' # Saeed Amen / saeed@thalesians.com
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
LoaderBBG
Abstract class for download of Bloomberg daily, intraday data and reference data.
Implemented by
- LoaderBBGCOM (old style Windows 32bit COM access to Bloomberg)
- LoaderBBGOpen (adapted version of new Bloomberg Open API for Python - recommended - although requires compilation)
"""
import datetime
import abc
import pandas
from pythalesians.util.loggermanager import LoggerManager
from pythalesians.market.loaders.lowlevel.loadertemplate import LoaderTemplate
class LoaderBBG(LoaderTemplate):
def __init__(self):
super(LoaderBBG, self).__init__()
self.logger = LoggerManager().getLogger(__name__)
# implement method in abstract superclass
def load_ticker(self, time_series_request):
"""
load_ticker - Retrieves market data from external data source (in this case Bloomberg)
Parameters
----------
time_series_request : TimeSeriesRequest
contains all the various parameters detailing time series start and finish, tickers etc
Returns
-------
DataFrame
"""
time_series_request_vendor = self.construct_vendor_time_series_request(time_series_request)
data_frame = None
self.logger.info("Request Bloomberg data")
# do we need daily or intraday data?
if (time_series_request.freq in ['daily', 'weekly', 'monthly', 'quarterly', 'yearly']):
# for events times/dates separately needs ReferenceDataRequest (when specified)
if 'release-date-time-full' in time_series_request.fields:
# experimental
datetime_data_frame = self.get_reference_data(time_series_request_vendor, time_series_request)
# remove fields 'release-date-time-full' from our request (and the associated field in the vendor)
index = time_series_request.fields.index('release-date-time-full')
time_series_request_vendor.fields.pop(index)
time_series_request.fields.pop(index)
# download all the other event fields (uses HistoricalDataRequest to Bloomberg)
# concatenate with date time fields
if len(time_series_request_vendor.fields) > 0:
events_data_frame = self.get_daily_data(time_series_request, time_series_request_vendor)
col = events_data_frame.index.name
events_data_frame = events_data_frame.reset_index(drop = False)
data_frame = pandas.concat([events_data_frame, datetime_data_frame], axis = 1)
temp = data_frame[col]
del data_frame[col]
data_frame.index = temp
else:
data_frame = datetime_data_frame
# for all other daily/monthly/quarter data, we can use HistoricalDataRequest to Bloomberg
else:
data_frame = self.get_daily_data(time_series_request, time_series_request_vendor)
# assume one ticker only
# for intraday data we use IntradayDataRequest to Bloomberg
if (time_series_request.freq in ['tick', 'intraday', 'second', 'minute', 'hourly']):
time_series_request_vendor.tickers = time_series_request_vendor.tickers[0]
if time_series_request.freq in ['tick', 'second']:
data_frame = self.download_tick(time_series_request_vendor)
else:
data_frame = self.download_intraday(time_series_request_vendor)
if data_frame is not None:
if data_frame.empty:
self.logger.info("No tickers returned for: " + time_series_request_vendor.tickers)
return None
cols = data_frame.columns.values
data_frame = data_frame.tz_localize('UTC')
cols = time_series_request.tickers[0] + "." + cols
data_frame.columns = cols
self.logger.info("Completed request from Bloomberg.")
return data_frame
def get_daily_data(self, time_series_request, time_series_request_vendor):
data_frame = self.download_daily(time_series_request_vendor)
# convert from vendor to Thalesians tickers/fields
if data_frame is not None:
if data_frame.empty:
self.logger.info("No tickers returned for...")
try:
self.logger.info(str(time_series_request_vendor.tickers))
except: pass
return None
returned_fields = data_frame.columns.get_level_values(0)
returned_tickers = data_frame.columns.get_level_values(1)
# TODO if empty try downloading again a year later
fields = self.translate_from_vendor_field(returned_fields, time_series_request)
tickers = self.translate_from_vendor_ticker(returned_tickers, time_series_request)
ticker_combined = []
for i in range(0, len(fields)):
ticker_combined.append(tickers[i] + "." + fields[i])
data_frame.columns = ticker_combined
data_frame.index.name = 'Date'
return data_frame
def get_reference_data(self, time_series_request_vendor, time_series_request):
end = datetime.datetime.today()
end = end.replace(year = end.year + 1)
time_series_request_vendor.finish_date = end
self.logger.debug("Requesting ref for " + time_series_request_vendor.tickers[0] + " etc.")
data_frame = self.download_ref(time_series_request_vendor)
self.logger.debug("Waiting for ref...")
# convert from vendor to Thalesians tickers/fields
if data_frame is not None:
returned_fields = data_frame.columns.get_level_values(0)
returned_tickers = data_frame.columns.get_level_values(1)
if data_frame is not None:
# TODO if empty try downloading again a year later
fields = self.translate_from_vendor_field(returned_fields, time_series_request)
tickers = self.translate_from_vendor_ticker(returned_tickers, time_series_request)
ticker_combined = []
for i in range(0, len(fields)):
ticker_combined.append(tickers[i] + "." + fields[i])
data_frame.columns = ticker_combined
# TODO coerce will be deprecated from pandas
data_frame = data_frame.convert_objects(convert_dates = 'coerce', convert_numeric= 'coerce')
return data_frame
# implement method in abstract superclass
@abc.abstractmethod
def kill_session(self):
return
@abc.abstractmethod
def download_tick(self, time_series_request):
return
@abc.abstractmethod
def download_intraday(self, time_series_request):
return
@abc.abstractmethod
def download_daily(self, time_series_request):
return
@abc.abstractmethod
def download_ref(self, time_series_request):
return
| 38.19802
| 121
| 0.665759
|
acfd7708339cc1c0fb3fee5701925edd3602d27d
| 540,131
|
py
|
Python
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs_/link_delay/state/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 64
|
2016-10-20T15:47:18.000Z
|
2021-11-11T11:57:32.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs_/link_delay/state/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 126
|
2016-10-05T10:36:14.000Z
|
2019-05-15T08:43:23.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs_/link_delay/state/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 63
|
2016-11-07T15:23:08.000Z
|
2021-09-22T14:41:16.000Z
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isis-neighbor-attribute/neighbors/neighbor/subTLVs/subTLVs/link-delay/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of IS Extended Reachability sub-TLV 33.
"""
__slots__ = ("_path_helper", "_extmethods", "__subtlv_type", "__a_bit", "__delay")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__subtlv_type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ISIS_TLV22_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV22_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV23_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV23_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV135_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV135_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV141_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV141_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV222_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV222_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV223_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV223_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV235_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV235_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV236_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV236_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV237_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV237_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV242_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV242_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV242_SR_CAPABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV242_SR_CAPABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV242_SR_ALGORITHM": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV242_SR_ALGORITHM": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="subtlv-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
self.__a_bit = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="a-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__delay = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isis-neighbor-attribute",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"link-delay",
"state",
]
def _get_subtlv_type(self):
"""
Getter method for subtlv_type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_delay/state/subtlv_type (identityref)
YANG Description: The type of subTLV being described. The type of subTLV is
expressed as a canonical name.
"""
return self.__subtlv_type
def _set_subtlv_type(self, v, load=False):
"""
Setter method for subtlv_type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_delay/state/subtlv_type (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_subtlv_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subtlv_type() directly.
YANG Description: The type of subTLV being described. The type of subTLV is
expressed as a canonical name.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ISIS_TLV22_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV22_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV23_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV23_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV135_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV135_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV141_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV141_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV222_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV222_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV223_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV223_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV235_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV235_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV236_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV236_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV237_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV237_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV242_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV242_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV242_SR_CAPABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV242_SR_CAPABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV242_SR_ALGORITHM": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV242_SR_ALGORITHM": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="subtlv-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subtlv_type must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ISIS_TLV22_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV22_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV23_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV23_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV135_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV135_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV135_TAG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV135_TAG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV135_TAG64': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV135_TAG64': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV135_PREFIX_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV135_PREFIX_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV135_PREFIX_FLAGS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV135_PREFIX_FLAGS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV135_IPV4_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV135_IPV4_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV135_IPV6_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV135_IPV6_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV141_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV141_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV222_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV222_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV223_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV223_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV235_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV235_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV235_TAG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV235_TAG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV235_TAG64': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV235_TAG64': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV235_PREFIX_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV235_PREFIX_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV235_PREFIX_FLAGS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV235_PREFIX_FLAGS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV235_IPV4_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV235_IPV4_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV235_IPV6_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV235_IPV6_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV236_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV236_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV236_TAG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV236_TAG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV236_TAG64': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV236_TAG64': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV236_PREFIX_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV236_PREFIX_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV236_PREFIX_FLAGS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV236_PREFIX_FLAGS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV236_IPV4_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV236_IPV4_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV236_IPV6_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV236_IPV6_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV237_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV237_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV237_TAG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV237_TAG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV237_TAG64': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV237_TAG64': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV237_PREFIX_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV237_PREFIX_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV237_PREFIX_FLAGS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV237_PREFIX_FLAGS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV237_IPV4_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV237_IPV4_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV237_IPV6_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV237_IPV6_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV242_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV242_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV242_SR_CAPABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV242_SR_CAPABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV242_SR_ALGORITHM': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV242_SR_ALGORITHM': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}},), is_leaf=True, yang_name="subtlv-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__subtlv_type = t
if hasattr(self, "_set"):
self._set()
def _unset_subtlv_type(self):
self.__subtlv_type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ISIS_TLV22_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV22_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV23_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV23_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV135_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV135_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV141_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV141_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV222_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV222_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV223_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV223_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV235_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV235_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV236_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV236_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV237_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV237_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV242_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV242_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV242_SR_CAPABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV242_SR_CAPABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV242_SR_ALGORITHM": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV242_SR_ALGORITHM": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="subtlv-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
def _get_a_bit(self):
"""
Getter method for a_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_delay/state/a_bit (boolean)
YANG Description: The A bit is set when the measured value of this parameter
exceeds its configured maximum threshold. The A bit is cleared
when the measured value falls below its configured reuse threshold.
"""
return self.__a_bit
def _set_a_bit(self, v, load=False):
"""
Setter method for a_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_delay/state/a_bit (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_a_bit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_a_bit() directly.
YANG Description: The A bit is set when the measured value of this parameter
exceeds its configured maximum threshold. The A bit is cleared
when the measured value falls below its configured reuse threshold.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
is_leaf=True,
yang_name="a-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """a_bit must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="a-bit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__a_bit = t
if hasattr(self, "_set"):
self._set()
def _unset_a_bit(self):
self.__a_bit = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="a-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_delay(self):
"""
Getter method for delay, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_delay/state/delay (uint32)
YANG Description: Average link delay value (in microseconds) between two directly
connected IS-IS neighbors over a configurable interval.
"""
return self.__delay
def _set_delay(self, v, load=False):
"""
Setter method for delay, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_delay/state/delay (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_delay is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_delay() directly.
YANG Description: Average link delay value (in microseconds) between two directly
connected IS-IS neighbors over a configurable interval.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """delay must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__delay = t
if hasattr(self, "_set"):
self._set()
def _unset_delay(self):
self.__delay = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
subtlv_type = __builtin__.property(_get_subtlv_type)
a_bit = __builtin__.property(_get_a_bit)
delay = __builtin__.property(_get_delay)
_pyangbind_elements = OrderedDict(
[("subtlv_type", subtlv_type), ("a_bit", a_bit), ("delay", delay)]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isis-neighbor-attribute/neighbors/neighbor/subTLVs/subTLVs/link-delay/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of IS Extended Reachability sub-TLV 33.
"""
__slots__ = ("_path_helper", "_extmethods", "__subtlv_type", "__a_bit", "__delay")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__subtlv_type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ISIS_TLV22_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV22_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV23_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV23_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV135_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV135_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV141_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV141_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV222_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV222_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV223_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV223_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV235_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV235_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV236_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV236_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV237_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV237_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV242_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV242_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV242_SR_CAPABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV242_SR_CAPABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV242_SR_ALGORITHM": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV242_SR_ALGORITHM": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="subtlv-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
self.__a_bit = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="a-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__delay = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isis-neighbor-attribute",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"link-delay",
"state",
]
def _get_subtlv_type(self):
"""
Getter method for subtlv_type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_delay/state/subtlv_type (identityref)
YANG Description: The type of subTLV being described. The type of subTLV is
expressed as a canonical name.
"""
return self.__subtlv_type
def _set_subtlv_type(self, v, load=False):
"""
Setter method for subtlv_type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_delay/state/subtlv_type (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_subtlv_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subtlv_type() directly.
YANG Description: The type of subTLV being described. The type of subTLV is
expressed as a canonical name.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ISIS_TLV22_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV22_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV23_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV23_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV135_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV135_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV141_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV141_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV222_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV222_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV223_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV223_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV235_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV235_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV236_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV236_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV237_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV237_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV242_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV242_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV242_SR_CAPABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV242_SR_CAPABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV242_SR_ALGORITHM": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV242_SR_ALGORITHM": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="subtlv-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subtlv_type must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ISIS_TLV22_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV22_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV22_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV22_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV23_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV23_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV23_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV23_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV135_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV135_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV135_TAG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV135_TAG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV135_TAG64': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV135_TAG64': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV135_PREFIX_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV135_PREFIX_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV135_PREFIX_FLAGS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV135_PREFIX_FLAGS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV135_IPV4_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV135_IPV4_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV135_IPV6_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV135_IPV6_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV141_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV141_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV141_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV141_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV222_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV222_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV222_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV222_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV223_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV223_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_IPV4_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_IPV4_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_MAX_LINK_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_MAX_RESERVABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_UNRESERVED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_IPV6_INTERFACE_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_IPV6_NEIGHBOR_ADDRESS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_EXTENDED_ADMIN_GROUP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_TE_DEFAULT_METRIC': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_LINK_ATTRIBUTES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_LINK_PROTECTION_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_BANDWIDTH_CONSTRAINTS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_UNCONSTRAINED_LSP': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_ADJ_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_ADJ_LAN_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_MIN_MAX_LINK_DELAY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_LINK_DELAY_VARIATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_LINK_LOSS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_RESIDUAL_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_AVAILABLE_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV223_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV223_UTILIZED_BANDWIDTH': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV235_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV235_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV235_TAG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV235_TAG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV235_TAG64': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV235_TAG64': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV235_PREFIX_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV235_PREFIX_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV235_PREFIX_FLAGS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV235_PREFIX_FLAGS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV235_IPV4_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV235_IPV4_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV235_IPV6_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV235_IPV6_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV236_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV236_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV236_TAG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV236_TAG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV236_TAG64': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV236_TAG64': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV236_PREFIX_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV236_PREFIX_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV236_PREFIX_FLAGS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV236_PREFIX_FLAGS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV236_IPV4_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV236_IPV4_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV236_IPV6_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV236_IPV6_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV237_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV237_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV237_TAG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV237_TAG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV237_TAG64': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV237_TAG64': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV237_PREFIX_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV237_PREFIX_SID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV237_PREFIX_FLAGS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV237_PREFIX_FLAGS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV237_IPV4_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV237_IPV4_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV237_IPV6_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV237_IPV6_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_TLV242_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_TLV242_SUBTLVS_TYPE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV242_SR_CAPABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV242_SR_CAPABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'TLV242_SR_ALGORITHM': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:TLV242_SR_ALGORITHM': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}},), is_leaf=True, yang_name="subtlv-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__subtlv_type = t
if hasattr(self, "_set"):
self._set()
def _unset_subtlv_type(self):
self.__subtlv_type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ISIS_TLV22_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV22_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV22_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV22_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV23_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV23_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV23_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV23_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV135_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV135_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV135_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV135_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV141_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV141_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV141_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV141_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV222_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV222_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV222_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV222_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV223_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV223_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV4_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV4_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MAX_LINK_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MAX_RESERVABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UNRESERVED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV6_INTERFACE_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_IPV6_NEIGHBOR_ADDRESS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_EXTENDED_ADMIN_GROUP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_TE_DEFAULT_METRIC": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_ATTRIBUTES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_PROTECTION_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_BANDWIDTH_CONSTRAINTS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UNCONSTRAINED_LSP": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADJ_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_ADJ_LAN_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_MIN_MAX_LINK_DELAY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_DELAY_VARIATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_LINK_LOSS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_RESIDUAL_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_AVAILABLE_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV223_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV223_UTILIZED_BANDWIDTH": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV235_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV235_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV235_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV235_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV236_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV236_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV236_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV236_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV237_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV237_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_TAG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_TAG64": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_PREFIX_SID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_PREFIX_FLAGS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_IPV4_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV237_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV237_IPV6_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_TLV242_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_TLV242_SUBTLVS_TYPE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV242_SR_CAPABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV242_SR_CAPABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"TLV242_SR_ALGORITHM": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:TLV242_SR_ALGORITHM": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="subtlv-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
def _get_a_bit(self):
"""
Getter method for a_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_delay/state/a_bit (boolean)
YANG Description: The A bit is set when the measured value of this parameter
exceeds its configured maximum threshold. The A bit is cleared
when the measured value falls below its configured reuse threshold.
"""
return self.__a_bit
def _set_a_bit(self, v, load=False):
"""
Setter method for a_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_delay/state/a_bit (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_a_bit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_a_bit() directly.
YANG Description: The A bit is set when the measured value of this parameter
exceeds its configured maximum threshold. The A bit is cleared
when the measured value falls below its configured reuse threshold.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
is_leaf=True,
yang_name="a-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """a_bit must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="a-bit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__a_bit = t
if hasattr(self, "_set"):
self._set()
def _unset_a_bit(self):
self.__a_bit = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="a-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_delay(self):
"""
Getter method for delay, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_delay/state/delay (uint32)
YANG Description: Average link delay value (in microseconds) between two directly
connected IS-IS neighbors over a configurable interval.
"""
return self.__delay
def _set_delay(self, v, load=False):
"""
Setter method for delay, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_delay/state/delay (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_delay is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_delay() directly.
YANG Description: Average link delay value (in microseconds) between two directly
connected IS-IS neighbors over a configurable interval.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """delay must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__delay = t
if hasattr(self, "_set"):
self._set()
def _unset_delay(self):
self.__delay = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
subtlv_type = __builtin__.property(_get_subtlv_type)
a_bit = __builtin__.property(_get_a_bit)
delay = __builtin__.property(_get_delay)
_pyangbind_elements = OrderedDict(
[("subtlv_type", subtlv_type), ("a_bit", a_bit), ("delay", delay)]
)
| 67.949553
| 42,480
| 0.489903
|
acfd7719399c86cfe4ae0c3228226eff656e54f8
| 6,196
|
py
|
Python
|
core/minecraft/hypixel/request.py
|
vcokltfre/Myaer
|
8e2a57f26635781e19716b47028f465617defa75
|
[
"MIT"
] | null | null | null |
core/minecraft/hypixel/request.py
|
vcokltfre/Myaer
|
8e2a57f26635781e19716b47028f465617defa75
|
[
"MIT"
] | null | null | null |
core/minecraft/hypixel/request.py
|
vcokltfre/Myaer
|
8e2a57f26635781e19716b47028f465617defa75
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2020 MyerFire
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import aiohttp
from ratelimit import limits
import core.config.config
import core.minecraft.request
HYPIXEL_API = "https://api.hypixel.net/"
CONNOR_LINFOOT_API = "https://api.connorlinfoot.com/"
@limits(calls=100, period=60) # hypixel ratelimit is 120/min, this is to be safe
async def get_player(player: str) -> dict:
uuid = (await core.minecraft.request.get_profile(player))[
"uuid"] # &name= is deprecated for the Hypixel API, so convert name to UUID with Mojang API
async with aiohttp.ClientSession() as session:
raw = await session.get(f"{HYPIXEL_API}player?key={core.config.config.hypixel_api_key}&uuid={uuid}")
player_json = await raw.json()
if player_json["success"] and player_json["player"]:
return player_json
elif player_json["success"] and player_json["player"] is None: # Hypixel API still returns "success" even if the
# player does not exist, hence the more complicated check
raise NameError(f"Player \"{player}\" does not exist!")
@limits(calls=100, period=60) # hypixel ratelimit is 120/min, this is to be safe
async def get_player_uuid(uuid: str) -> dict:
async with aiohttp.ClientSession() as session:
raw = await session.get(
f"{HYPIXEL_API}player?key={core.config.config.hypixel_api_key}&uuid={uuid.replace('-', '')}")
player_json = await raw.json()
if player_json["success"] and player_json["player"]:
return player_json
elif player_json["success"] and player_json["player"] is None: # Hypixel API still returns "success" even if
# the player does not exist, hence the more complicated check
raise NameError(f"Player \"{uuid}\" does not exist!")
@limits(calls=100, period=60) # hypixel ratelimit is 120/min, this is to be safe
async def get_leaderboards() -> dict:
async with aiohttp.ClientSession() as session:
raw = await session.get(f"{HYPIXEL_API}leaderboards?key={core.config.config.hypixel_api_key}")
leaderboards_json = await raw.json()
if leaderboards_json["success"]:
return leaderboards_json
elif not leaderboards_json["success"]:
return NameError(
"Something went wrong.") # The only reason there could be an error in retreiving leaderboard data is if
# the API key is invalid, but that should not be possible. TL;DR: If anything gets here, something went
# horribly wrong.
@limits(calls=100, period=60) # hypixel ratelimit is 120/min, this is to be safe
async def get_guild_by_uuid(uuid: str) -> dict:
async with aiohttp.ClientSession() as session:
raw = await session.get(f"{HYPIXEL_API}guild?key={core.config.config.hypixel_api_key}&player={uuid}")
player_guild_json = await raw.json()
if player_guild_json["success"] and player_guild_json["guild"]:
return player_guild_json
elif player_guild_json["success"] and player_guild_json["guild"] is None:
raise NameError(f"Player \"{uuid}\" is not in a guild")
@limits(calls=100, period=60) # hypixel ratelimit is 120/min, this is to be safe
async def get_guild_by_name(guild: str) -> dict:
async with aiohttp.ClientSession() as session:
raw = await session.get(f"{HYPIXEL_API}guild?key={core.config.config.hypixel_api_key}&name={guild}")
player_guild_json = await raw.json()
if player_guild_json["success"] and player_guild_json["guild"]:
return player_guild_json
elif player_guild_json["success"] and player_guild_json["guild"] is None:
raise NameError(f"Player \"{uuid}\" does not exist!")
@limits(calls=100, period=60) # hypixel ratelimit is 120/min, this is to be safe
async def get_friends_by_uuid(uuid: str) -> dict:
async with aiohttp.ClientSession() as session:
raw = await session.get(f"{HYPIXEL_API}friends?key={core.config.config.hypixel_api_key}&uuid={uuid}")
player_friends_json = await raw.json()
if player_friends_json["success"] and player_friends_json["records"]:
return player_friends_json
elif player_friends_json["success"] and player_friends_json["records"] is None:
raise NameError(f"Player \"{uuid}\" does not exist!")
@limits(calls=100, period=60) # hypixel ratelimit is 120/min, this is to be safe
async def get_status_by_uuid(uuid: str) -> dict:
async with aiohttp.ClientSession() as session:
raw = await session.get(f"{HYPIXEL_API}status?key={core.config.config.hypixel_api_key}&uuid={uuid}")
player_status_json = await raw.json()
if player_status_json["success"] and player_status_json["session"]:
return player_status_json
elif not player_status_json["success"]:
cause = player_status_json["cause"]
raise NameError(cause)
async def get_games_connor_linfoot() -> dict:
async with aiohttp.ClientSession() as session:
raw = await session.get(f"{CONNOR_LINFOOT_API}v2/games/hypixel/")
games_json = await raw.json()
return games_json
| 49.174603
| 118
| 0.705455
|
acfd77e69cfbcd4012356b9999566b7227e10673
| 40,117
|
py
|
Python
|
codalab/lib/worksheet_util.py
|
millerjohnp/codalab-worksheets
|
d6fc37864e7a8966380fc9d73865b10e434d6678
|
[
"Apache-2.0"
] | null | null | null |
codalab/lib/worksheet_util.py
|
millerjohnp/codalab-worksheets
|
d6fc37864e7a8966380fc9d73865b10e434d6678
|
[
"Apache-2.0"
] | null | null | null |
codalab/lib/worksheet_util.py
|
millerjohnp/codalab-worksheets
|
d6fc37864e7a8966380fc9d73865b10e434d6678
|
[
"Apache-2.0"
] | null | null | null |
"""
worksheet_util contains the following public functions:
- request_lines: pops up an editor to allow for full-text editing of a worksheet.
- parse_worksheet_form: takes those lines and generates a set of items (triples)
- interpret_items: takes those triples and returns a structure that interprets all the directives in the worksheet item.
A worksheet contains a list of (worksheet) items, where each item includes
- bundle_uuid (only used if type == bundle)
- subworkheet_uuid (only used if type == worksheet)
- value (used for text and directive)
- type (one of the following)
* markup: just plain plain text (markdown)
* directive: special instructions for determining formatting
* bundle: represents a bundle
* worksheet: represents a worksheet
This is the representation in the DB.
In the code, we have full items of the form (bundle_info, subworkheet_info, value_obj, type).
In other words, there are two representations of worksheet items:
- (bundle_uuid, subworksheet_uuid, value, type) [inserted into the database]
- (bundle_info, subworksheet_info, value_obj, type) [used in the code]
A genpath (generalized path) is either:
- a bundle field (e.g., 'command')
- a metadata field (e.g., 'name')
- a path (starts with '/'), but can descend into a YAML file (e.g., /stats:train/errorRate)
See get_worksheet_lines for documentation on the specification of the directives.
"""
import copy
import os
import re
import sys
from codalab.common import PermissionError, UsageError
from codalab.lib import canonicalize, editor_util, formatting
from codalab.objects.permission import group_permissions_str, permission_str
from codalab.rest.worksheet_block_schemas import (
FetchStatusSchema,
BlockModes,
MarkupBlockSchema,
BundleContentsBlockSchema,
BundleImageBlockSchema,
TableBlockSchema,
RecordsRowSchema,
RecordsBlockSchema,
GraphBlockSchema,
SubworksheetsBlock,
BundleUUIDSpecSchema,
)
# Note: this is part of the client's session, not server side.
CURRENT_WORKSHEET = '.'
# Types of (raw) worksheet items
TYPE_MARKUP = 'markup'
TYPE_DIRECTIVE = 'directive'
TYPE_BUNDLE = 'bundle'
TYPE_WORKSHEET = 'worksheet'
WORKSHEET_ITEM_TYPES = (TYPE_MARKUP, TYPE_DIRECTIVE, TYPE_BUNDLE, TYPE_WORKSHEET)
BUNDLE_REGEX = re.compile('^(\[(.*)\])?\s*\{([^{]*)\}$')
SUBWORKSHEET_REGEX = re.compile('^(\[(.*)\])?\s*\{\{(.*)\}\}$')
DIRECTIVE_CHAR = '%'
DIRECTIVE_REGEX = re.compile(r'^' + DIRECTIVE_CHAR + '\s*(.*)$')
# Default number of lines to pull for each display mode.
DEFAULT_CONTENTS_MAX_LINES = 10
def markup_item(x):
return (None, None, x, TYPE_MARKUP)
def directive_item(x):
return (None, None, x, TYPE_DIRECTIVE)
def bundle_item(x):
return (x, None, '', TYPE_BUNDLE) # TODO: replace '' with None when tables.py schema is updated
def subworksheet_item(x):
return (
None,
x,
'',
TYPE_WORKSHEET,
) # TODO: replace '' with None when tables.py schema is updated
def bundle_line(description, uuid):
return '[%s]{%s}' % (description, uuid)
def worksheet_line(description, uuid):
return '[%s]{{%s}}' % (description, uuid)
############################################################
def get_worksheet_info_edit_command(raw_command_map):
"""
Return a cli-command for editing worksheet-info. Return None if raw_command_map contents are invalid.
Input:
raw_command: a map containing the info to edit, new_value and the action to perform
"""
key = raw_command_map.get('k')
value = raw_command_map.get('v')
action = raw_command_map.get('action')
if key is None or not key or value is None or not action == 'worksheet-edit':
return None
return 'wedit -{k[0]} "{v}"'.format(**raw_command_map)
def convert_item_to_db(item):
(bundle_info, subworksheet_info, value_obj, item_type) = item
return (
bundle_info['uuid'] if bundle_info else None,
subworksheet_info['uuid'] if subworksheet_info else None,
# TODO: change tables.py so that None's are allowed
(formatting.tokens_to_string(value_obj) if item_type == TYPE_DIRECTIVE else value_obj)
or '',
item_type,
)
def get_worksheet_lines(worksheet_info):
"""
Generator that returns pretty-printed lines of text for the given worksheet.
"""
lines = []
for item in worksheet_info['items']:
(bundle_info, subworksheet_info, value_obj, item_type) = item
if item_type == TYPE_MARKUP:
lines.append(value_obj)
elif item_type == TYPE_DIRECTIVE:
if len(value_obj) > 0 and value_obj[0] == DIRECTIVE_CHAR:
# A comment directive
lines.append('//' + ' '.join(value_obj[1:]))
else:
# A normal directive
value = formatting.tokens_to_string(value_obj)
value = (
DIRECTIVE_CHAR
+ ('' if len(value) == 0 or value.startswith(DIRECTIVE_CHAR) else ' ')
+ value
)
lines.append(value)
elif item_type == TYPE_BUNDLE:
if 'metadata' not in bundle_info:
# This happens when we add bundles by uuid and don't actually make sure they exist
# lines.append('ERROR: non-existent bundle %s' % bundle_info['uuid'])
description = formatting.contents_str(None)
else:
metadata = bundle_info['metadata']
# raise Exception(metadata)
description = bundle_info['bundle_type']
description += ' ' + metadata['name']
deps = interpret_genpath(bundle_info, 'dependencies')
if deps:
description += ' -- ' + deps
command = bundle_info.get('command')
if command:
description += ' : ' + command
lines.append(bundle_line(description, bundle_info['uuid']))
elif item_type == TYPE_WORKSHEET:
lines.append(
worksheet_line(
'worksheet ' + formatting.contents_str(subworksheet_info.get('name')),
subworksheet_info['uuid'],
)
)
else:
raise RuntimeError('Invalid worksheet item type: %s' % type)
return lines
def get_formatted_metadata(cls, metadata, raw=False):
"""
Input:
cls: bundle subclass (e.g. DatasetBundle, RuunBundle, ProgramBundle)
metadata: bundle metadata
raw: boolean value indicating if the raw value needs to be returned
Return a list of tuples containing the key and formatted value of metadata.
"""
result = []
for spec in cls.METADATA_SPECS:
key = spec.key
if not raw:
if key not in metadata:
continue
if metadata[key] == '' or metadata[key] == []:
continue
value = apply_func(spec.formatting, metadata.get(key))
if isinstance(value, list):
value = ' | '.join(value)
else:
value = metadata.get(key)
result.append((key, value))
return result
def get_editable_metadata_fields(cls):
"""
Input:
cls: bundle subclass (e.g. DatasetBundle, RuunBundle, ProgramBundle)
metadata: bundle metadata
Return a list of metadata fields that are editable by the owner.
"""
result = []
for spec in cls.METADATA_SPECS:
key = spec.key
if not spec.generated:
result.append(key)
return result
def get_metadata_types(cls):
"""
Return map from key -> type for the metadata fields in the given bundle class.
e.g.
'request_time' -> 'basestring'
'time' -> 'duration'
'tags' -> 'list'
Possible types: 'int', 'float', 'list', 'bool', 'duration',
'size', 'date', 'basestring'
Special types like 'duration' are only indicated when client-side
formatting/serialization is necessary.
"""
return {
spec.key: (not issubclass(spec.type, str) and spec.formatting) or spec.type.__name__
for spec in cls.METADATA_SPECS
}
def request_lines(worksheet_info):
"""
Input: worksheet_info
Popup an editor, populated with the current worksheet contents.
Return a list of new items (bundle_uuid, value, type) that the user typed into the editor.
"""
# Construct a form template with the current value of the worksheet.
template_lines = get_worksheet_lines(worksheet_info)
template = ''.join([line + os.linesep for line in template_lines])
lines = editor_util.open_and_edit(suffix='.md', template=template)
# Process the result
form_result = [line.rstrip('\n') for line in lines]
if form_result == template_lines:
raise UsageError('No change made; aborting')
return form_result
def parse_worksheet_form(form_result, model, user, worksheet_uuid):
"""
Input: form_result is a list of lines.
Return (list of (bundle_info, subworksheet_info, value, type) tuples, commands to execute)
"""
def get_line_type(line):
if line.startswith('//'):
return 'comment'
elif BUNDLE_REGEX.match(line) is not None:
return TYPE_BUNDLE
elif SUBWORKSHEET_REGEX.match(line) is not None:
return TYPE_WORKSHEET
elif DIRECTIVE_REGEX.match(line) is not None:
return TYPE_DIRECTIVE
else:
return TYPE_MARKUP
line_types = [get_line_type(line) for line in form_result]
# Extract bundle specs and resolve uuids in one batch
bundle_lines = [
(i, BUNDLE_REGEX.match(line).group(3))
for i, line in enumerate(form_result)
if line_types[i] == TYPE_BUNDLE
]
# bundle_specs = (line_indices, bundle_specs)
bundle_specs = list(zip(*bundle_lines)) if len(bundle_lines) > 0 else [(), ()]
# bundle_uuids = {line_i: bundle_uuid, ...}
bundle_uuids = dict(
list(
zip(
bundle_specs[0],
canonicalize.get_bundle_uuids(model, user, worksheet_uuid, bundle_specs[1]),
)
)
)
items = []
for line_i, (line_type, line) in enumerate(zip(line_types, form_result)):
if line_type == 'comment':
comment = line[2:]
items.append(directive_item([DIRECTIVE_CHAR, comment]))
elif line_type == TYPE_BUNDLE:
bundle_info = {
'uuid': bundle_uuids[line_i]
} # info doesn't need anything other than uuid
items.append(bundle_item(bundle_info))
elif line_type == TYPE_WORKSHEET:
subworksheet_spec = SUBWORKSHEET_REGEX.match(line).group(3)
try:
subworksheet_uuid = canonicalize.get_worksheet_uuid(
model, user, worksheet_uuid, subworksheet_spec
)
subworksheet_info = {
'uuid': subworksheet_uuid
} # info doesn't need anything other than uuid
items.append(subworksheet_item(subworksheet_info))
except UsageError as e:
items.append(markup_item(str(e) + ': ' + line))
elif line_type == TYPE_DIRECTIVE:
directive = DIRECTIVE_REGEX.match(line).group(1)
items.append(directive_item(formatting.string_to_tokens(directive)))
elif line_type == TYPE_MARKUP:
items.append(markup_item(line))
else:
raise RuntimeError("Invalid line type %s: this should not happen." % line_type)
return items
def is_file_genpath(genpath):
"""
Determine whether the genpath is a file (e.g., '/stdout') or not (e.g., 'command')
:param genpath: a generalized path
:return: a boolean value indicating if the genpath is a file.
"""
return genpath.startswith('/')
def interpret_genpath(bundle_info, genpath, db_model=None, owner_cache=None):
"""
Quickly interpret the genpaths (generalized path) that only require looking
bundle_info (e.g., 'time', 'command'). The interpretation of generalized
paths that require reading files is done by interpret_file_genpath.
If genpath is referring to a file, then just returns instructions for fetching that file rather than actually doing it.
:param bundle_info: dictionary which contains metadata of current bundle's information, e.g. uuid, bundle_type, owner_id, etc.
:param genpath: a generalized path, e.g. column names(summary, owner, etc.), args.
:param db_model (optional): database model which is used to query database
:param owner_cache (optional): a dictionary stores mappings from owner_id to owner
:return: the interpretation of genpath
"""
if is_file_genpath(genpath):
return (bundle_info['uuid'], genpath)
# Render dependencies
deps = bundle_info.get('dependencies', [])
anonymous = len(deps) == 1 and deps[0]['child_path'] == ''
def render_dep(dep, show_key=True, show_uuid=False):
if show_key and not anonymous:
if show_uuid or dep['child_path'] != dep['parent_name']:
a = dep['child_path'] + ':'
else:
a = ':'
else:
a = ''
b = dep['parent_uuid'] if show_uuid else (dep['parent_name'] or '')
c = '/' + dep['parent_path'] if dep['parent_path'] else ''
return a + b + c
# Special genpaths (dependencies, args)
if genpath == 'dependencies':
return ','.join([render_dep(dep) for dep in deps])
elif genpath.startswith('dependencies/'):
# Look up the particular dependency
_, name = genpath.split('/', 1)
for dep in deps:
if dep['child_path'] == name:
return render_dep(dep, show_key=False)
return formatting.verbose_contents_str(None)
elif genpath == 'args':
# Arguments that we would pass to 'cl'
args = []
bundle_type = bundle_info.get('bundle_type')
if bundle_type not in ('make', 'run'):
return None
args += [bundle_type]
# Dependencies
for dep in deps:
args.append(render_dep(dep, show_uuid=True))
# Command
if bundle_info['command']:
args.append(formatting.quote(bundle_info['command']))
# Add request arguments from metadata
metadata = bundle_info['metadata']
for key, value in metadata.items():
if key.startswith('request_') and value:
key = key.replace('_', '-')
if isinstance(value, bool):
args.append('--' + key)
else:
args.extend(['--' + key, formatting.quote(str(value))])
return ' '.join(args)
elif genpath == 'summary':
def friendly_render_dep(dep):
key = dep['child_path'] or dep['parent_name']
friendly_parent_name = formatting.verbose_contents_str(dep['parent_name'])
value = (
key
+ '{'
+ (friendly_parent_name + ':' if key != dep['parent_name'] else '')
+ dep['parent_uuid'][0:4]
+ '}'
)
return key, value
# Nice easy-to-ready description of how this bundle got created.
bundle_type = bundle_info.get('bundle_type')
if bundle_type in ('dataset', 'program'):
return '[uploaded]'
if bundle_type == 'make':
args = []
for dep in deps:
args.append(friendly_render_dep(dep)[1])
return '= ' + ' '.join(args)
elif bundle_type == 'run':
return '! ' + bundle_info['command']
elif genpath == 'host_worksheets':
if 'host_worksheets' in bundle_info:
return ' '.join(
'%s(%s)' % (info['name'], info['uuid']) for info in bundle_info['host_worksheets']
)
elif genpath == 'permission':
if 'permission' in bundle_info:
return permission_str(bundle_info['permission'])
elif genpath == 'group_permissions':
if 'group_permissions' in bundle_info:
# FIXME(sckoo): we will be passing the old permissions format into this
# which has been updated to accommodate the new formatting
return group_permissions_str(bundle_info['group_permissions'])
elif genpath == 'owner':
if 'owner_id' in bundle_info:
if owner_cache is not None and bundle_info['owner_id'] in owner_cache:
return owner_cache[bundle_info['owner_id']]
else:
# We might batch this database operation in the future
owner = db_model.get_user(user_id=bundle_info['owner_id'])
owner_cache[bundle_info['owner_id']] = owner.user_name
return owner.user_name
# Bundle field?
value = bundle_info.get(genpath)
if value is not None:
return value
# Metadata field?
value = bundle_info.get('metadata', {}).get(genpath)
if value is not None:
return value
return None
def format_metadata(metadata):
"""
Format worksheet item metadata based on field type specified in the schema.
"""
if metadata:
unformatted_fields = [
(name, func) for (_, name, func) in get_default_schemas()['default'] if func
]
for (name, func) in unformatted_fields:
if metadata.get(name):
metadata[name] = apply_func(func, metadata[name])
def canonicalize_schema_item(args):
"""
Users who type in schema items can specify a partial argument list.
Return the canonicalize version (a triple).
"""
if len(args) == 1: # genpath
return (os.path.basename(args[0]).split(":")[-1], args[0], None)
elif len(args) == 2: # name genpath
return (args[0], args[1], None)
elif len(args) == 3: # name genpath post-processing
return (args[0], args[1], args[2])
else:
raise UsageError('Invalid number of arguments: %s' % (args,))
def canonicalize_schema_items(items):
return [canonicalize_schema_item(item) for item in items]
def apply_func(func, arg):
"""
Apply post-processing function |func| to |arg|.
|func| is a string representing a list of functions (which are to be
applied to |arg| in succession). Each function is either:
- 'duration', 'date', 'size' for special formatting
- '%...' for sprintf-style formatting
- s/.../... for regular expression substitution
- [a:b] for taking substrings
"""
FUNC_DELIM = ' | '
if isinstance(arg, tuple):
# tuples are (bundle_uuid, genpath) which have not been fleshed out
return arg + (func,)
try:
if func is None:
return arg
# String encoding of a function: size s/a/b
for f in func.split(FUNC_DELIM):
if f == 'str':
arg = str(arg)
elif f == 'date':
arg = formatting.date_str(float(arg)) if arg is not None else None
elif f == 'duration':
arg = formatting.duration_str(float(arg)) if arg is not None else None
elif f == 'size':
arg = formatting.size_str(float(arg)) if arg is not None else None
elif f.startswith('%'):
arg = (f % float(arg)) if arg is not None else None
elif f.startswith('s/'): # regular expression: s/<old string>/<new string>
esc_slash = '_ESC_SLASH_' # Assume this doesn't occur in s
# Preserve escaped characters: \/
tokens = f.replace('\\/', esc_slash).split('/')
if len(tokens) != 3:
return '<invalid regex: %s>' % f
s = tokens[1].replace(esc_slash, '/')
t = tokens[2].replace(esc_slash, '/')
arg = re.sub(s, t, arg)
elif f.startswith('['): # substring
m = re.match('\[(.*):(.*)\]', f)
if m:
start = int(m.group(1) or 0)
end = int(m.group(2) or len(arg))
arg = arg[start:end]
else:
return '<invalid function: %s>' % f
elif f.startswith('add '):
# 'add k v' checks if arg is a dictionary and updates it with arg[k] = v
if isinstance(arg, dict):
k, v = f.split(' ')[1:]
arg[k] = v
else:
return 'arg (%s) not a dictionary' % type(arg)
elif f.startswith('key '):
# 'key k' converts arg into a dictionary where arg[k] = arg
arg = {f.split(' ')[1]: arg}
else:
return '<invalid function: %s>' % f
return arg
except:
# Applying the function failed, so just return the arg.
return arg
def get_default_schemas():
# Single fields
uuid = ['uuid[0:8]', 'uuid', '[0:8]']
name = ['name']
summary = ['summary']
data_size = ['data_size', 'data_size', 'size']
time = ['time', 'time', 'duration']
state = ['state']
description = ['description']
created = ['created', 'created', 'date']
schemas = {}
# Schemas corresponding to one field
schemas['uuid'] = [uuid]
schemas['name'] = [name]
schemas['summary'] = [summary]
schemas['data_size'] = [data_size]
schemas['time'] = [time]
schemas['state'] = [state]
schemas['description'] = [description]
schemas['created'] = [created]
# Schemas involving multiple fields
schemas['default'] = [uuid, name, summary, data_size, state, description]
schemas['program'] = [uuid, name, data_size, description]
schemas['dataset'] = [uuid, name, data_size, description]
schemas['make'] = [uuid, name, summary, data_size, state, description]
schemas['run'] = [uuid, name, summary, data_size, time, state, description]
for key in schemas:
schemas[key] = canonicalize_schema_items(schemas[key])
return schemas
def get_command(value_obj): # For directives only
return value_obj[0] if len(value_obj) > 0 else None
def interpret_items(schemas, raw_items, db_model=None):
"""
Interpret different items based on their types.
:param schemas: initial mapping from name to list of schema items (columns of a table)
:param raw_items: list of (raw) worksheet items (triples) to interpret
:param db_model: database model which is used to query database
:return: {'items': interpreted_items, ...}, where interpreted_items is a list of:
{
'mode': display mode ('markup' | 'contents' | 'image' | 'html', etc.)
'interpreted': one of
- rendered string
- target = (bundle_uuid, genpath)
- (header = (col1, ..., coln), rows = [{col1:value1, ..., coln:valuen}, ...]) [for tables]
- {keywords: [...]} for mode = 'search' or 'wsearch'
'properties': dict of properties (e.g., width, maxlines, etc.),
'bundle_info': bundle_info or list of bundle_infos,
'subworksheet_info': subworksheet,
}
In addition, return an alignment between the raw items and the interpreted items.
Each interpreted item has a focusIndex, and possibly consists of a list of
table rows (indexed by subFocusIndex). Here is an example:
--- Raw --- --- Interpreted ---
rawIndex (focusIndex, subFocusIndex)
0 % display table
1 [bundle] [table - row 0 (0, 0)
2 [bundle] - row 1] (0, 1)
3
4 hello [markup (1, 0)
5 world ]
6 [worksheet] [worksheet] (2, 0)
7
The mapping should be computed as follows:
- Some raw items contribute directly to a particular interpreted item.
- Others (blank lines, directives, schema definitions) don't.
- Those that don't should get mapped to the next interpreted item.
"""
raw_to_block = [] # rawIndex => (focusIndex, subFocusIndex)
# Set default schema
current_schema = None
default_display = ('table', 'default')
current_display = default_display
blocks = []
bundle_infos = []
worksheet_infos = []
def get_schema(args): # args is a list of schema names
args = args if len(args) > 0 else ['default']
schema = []
for arg in args:
# If schema doesn't exist, then treat as item (e.g., uuid).
schema += schemas.get(arg, canonicalize_schema_items([arg.split(':', 2)]))
return schema
def is_missing(info):
return 'metadata' not in info
def parse_properties(args):
properties = {}
for item in args:
if '=' not in item:
raise UsageError('Expected <key>=<value>, but got %s' % item)
key, value = item.split('=', 1)
properties[key] = value
return properties
def genpath_to_target(bundle_info, genpath):
# bundle_info, '/stdout' => target = (uuid, 'stdout')
if not is_file_genpath(genpath):
raise UsageError('Not file genpath: %s' % genpath)
# strip off the leading / from genpath to create a subpath in the target.
return (bundle_info['uuid'], genpath[1:])
def flush_bundles():
"""
Having collected bundles in |bundle_infos|, flush them into |blocks|,
potentially as a single table depending on the mode.
"""
if len(bundle_infos) == 0:
return
def raise_genpath_usage_error():
raise UsageError(
'Expected \'% display '
+ mode
+ ' (genpath)\', but got \'% display '
+ ' '.join([mode] + args)
+ '\''
)
# Print out the curent bundles somehow
mode = current_display[0]
args = current_display[1:]
if mode == 'hidden':
pass
elif mode == 'contents' or mode == 'image':
for item_index, bundle_info in bundle_infos:
if is_missing(bundle_info):
blocks.append(
MarkupBlockSchema().load({'text': 'ERROR: cannot access bundle'}).data
)
continue
# Parse arguments
if len(args) == 0:
raise_genpath_usage_error()
# these two are required for the target
(bundle_uuid, target_genpath) = genpath_to_target(bundle_info, args[0])
properties = parse_properties(args[1:])
block_object = {
'target_genpath': target_genpath,
'bundles_spec': BundleUUIDSpecSchema()
.load(BundleUUIDSpecSchema.create_json([bundle_info]))
.data,
'status': FetchStatusSchema.get_unknown_status(),
}
if mode == 'contents':
try:
block_object['max_lines'] = int(
properties.get('maxlines', DEFAULT_CONTENTS_MAX_LINES)
)
except ValueError:
raise UsageError("maxlines must be integer")
blocks.append(BundleContentsBlockSchema().load(block_object).data)
elif mode == 'image':
block_object['width'] = properties.get('width', None)
block_object['height'] = properties.get('height', None)
blocks.append(BundleImageBlockSchema().load(block_object).data)
elif mode == 'record':
# display record schema =>
# key1: value1
# key2: value2
# ...
schema = get_schema(args)
for item_index, bundle_info in bundle_infos:
header = ('key', 'value')
rows = []
for (name, genpath, post) in schema:
rows.append(
RecordsRowSchema()
.load(
{
'key': name + ':',
'value': apply_func(post, interpret_genpath(bundle_info, genpath)),
}
)
.data
)
blocks.append(
RecordsBlockSchema()
.load(
{
'bundles_spec': BundleUUIDSpecSchema()
.load(BundleUUIDSpecSchema.create_json([bundle_info]))
.data,
'status': FetchStatusSchema.get_unknown_status(),
'header': header,
'rows': rows,
}
)
.data
)
elif mode == 'table':
# display table schema =>
# key1 key2
# b1_value1 b1_value2
# b2_value1 b2_value2
schema = get_schema(args)
header = tuple(name for (name, genpath, post) in schema)
rows = []
processed_bundle_infos = []
# Cache the mapping between owner_id to owner on current worksheet
owner_cache = {}
for item_index, bundle_info in bundle_infos:
if 'metadata' in bundle_info:
rows.append(
{
name: apply_func(
post,
interpret_genpath(
bundle_info, genpath, db_model=db_model, owner_cache=owner_cache
),
)
for (name, genpath, post) in schema
}
)
processed_bundle_infos.append(copy.deepcopy(bundle_info))
else:
# The front-end relies on the name metadata field existing
processed_bundle_info = copy.deepcopy(bundle_info)
processed_bundle_info['metadata'] = {'name': '<invalid>'}
rows.append(
{
name: apply_func(
post, interpret_genpath(processed_bundle_info, genpath)
)
for (name, genpath, post) in schema
}
)
processed_bundle_infos.append(processed_bundle_info)
blocks.append(
TableBlockSchema()
.load(
{
'bundles_spec': BundleUUIDSpecSchema()
.load(BundleUUIDSpecSchema.create_json(processed_bundle_infos))
.data,
'status': FetchStatusSchema.get_unknown_status(),
'header': header,
'rows': rows,
}
)
.data
)
elif mode == 'graph':
# display graph <genpath> <properties>
if len(args) == 0:
raise_genpath_usage_error()
# trajectories is list of {
# 'uuid': ...,
# 'display_name': ..., # What to show as the description of a bundle
# 'target': (bundle_uuid, subpath)
# }
properties = parse_properties(args[1:])
trajectories = [
{
'bundle_uuid': bundle_info['uuid'],
'display_name': interpret_genpath(
bundle_info, properties.get('display_name', 'name')
),
'target_genpath': genpath_to_target(bundle_info, args[0])[1],
}
for item_index, bundle_info in bundle_infos
]
try:
max_lines = int(properties.get('maxlines', DEFAULT_CONTENTS_MAX_LINES))
except ValueError:
raise UsageError("maxlines must be integer")
blocks.append(
GraphBlockSchema()
.load(
{
'trajectories': trajectories,
'bundles_spec': BundleUUIDSpecSchema()
.load(BundleUUIDSpecSchema.create_json([bundle_infos[0][1]]))
.data, # Only show the first one for now
# 'bundles_spec': BundleUUIDSpecSchema().load(BundleUUIDSpecSchema.create_json(
# [copy.deepcopy(bundle_info) for item_index, bundle_info in bundle_infos]).data,
'max_lines': max_lines,
'xlabel': properties.get('xlabel', None),
'ylabel': properties.get('ylabel', None),
}
)
.data
)
else:
raise UsageError('Unknown display mode: %s' % mode)
bundle_infos[:] = [] # Clear
def flush_worksheets():
if len(worksheet_infos) == 0:
return
blocks.append(
SubworksheetsBlock().load({'subworksheet_infos': copy.deepcopy(worksheet_infos)}).data
)
worksheet_infos[:] = []
# Go through all the raw items...
last_was_empty_line = False
for raw_index, item in enumerate(raw_items):
new_last_was_empty_line = True
try:
(bundle_info, subworksheet_info, value_obj, item_type) = item
is_bundle = item_type == TYPE_BUNDLE
is_search = item_type == TYPE_DIRECTIVE and get_command(value_obj) == 'search'
is_directive = item_type == TYPE_DIRECTIVE
is_worksheet = item_type == TYPE_WORKSHEET
if not is_bundle:
flush_bundles()
if not is_worksheet:
flush_worksheets()
# Reset display to minimize long distance dependencies of directives
if not (is_bundle or is_search):
current_display = default_display
# Reset schema to minimize long distance dependencies of directives
if not is_directive:
current_schema = None
if item_type == TYPE_BUNDLE:
raw_to_block.append((len(blocks), len(bundle_infos)))
bundle_infos.append((raw_index, bundle_info))
elif item_type == TYPE_WORKSHEET:
raw_to_block.append((len(blocks), len(worksheet_infos)))
worksheet_infos.append(subworksheet_info)
elif item_type == TYPE_MARKUP:
new_last_was_empty_line = value_obj == ''
if (
len(blocks) > 0
and blocks[-1]['mode'] == BlockModes.markup_block
and not last_was_empty_line
and not new_last_was_empty_line
):
# Join with previous markup item
blocks[-1]['text'] += '\n' + value_obj
elif not new_last_was_empty_line:
blocks.append(
MarkupBlockSchema().load({'id': len(blocks), 'text': value_obj}).data
)
# Important: set raw_to_block after so we can focus on current item.
if new_last_was_empty_line:
raw_to_block.append(None)
else:
raw_to_block.append((len(blocks) - 1, 0))
elif item_type == TYPE_DIRECTIVE:
command = get_command(value_obj)
if command == '%' or command == '' or command is None:
# Comment
pass
elif command == 'schema':
# Start defining new schema
if len(value_obj) < 2:
raise UsageError("`schema` missing name")
name = value_obj[1]
schemas[name] = current_schema = []
elif command == 'addschema':
# Add to schema
if current_schema is None:
raise UsageError("`addschema` must be preceded by `schema` directive")
if len(value_obj) < 2:
raise UsageError("`addschema` missing name")
name = value_obj[1]
current_schema += schemas[name]
elif command == 'add':
# Add to schema
if current_schema is None:
raise UsageError("`add` must be preceded by `schema` directive")
schema_item = canonicalize_schema_item(value_obj[1:])
current_schema.append(schema_item)
elif command == 'display':
# Set display
current_display = value_obj[1:]
else:
raise UsageError("unknown directive `%s`" % command)
raw_to_block.append(None)
else:
raise RuntimeError('Unknown worksheet item type: %s' % item_type)
# Flush bundles once more at the end
if raw_index == len(raw_items) - 1:
flush_bundles()
flush_worksheets()
except UsageError as e:
current_schema = None
bundle_infos[:] = []
worksheet_infos[:] = []
blocks.append(
MarkupBlockSchema()
.load({'text': 'Error on line %d: %s' % (raw_index, str(e))})
.data
)
raw_to_block.append((len(blocks) - 1, 0))
except Exception:
current_schema = None
bundle_infos[:] = []
worksheet_infos[:] = []
import traceback
traceback.print_exc()
blocks.append(
MarkupBlockSchema()
.load({'text': 'Unexpected error while parsing line %d' % raw_index})
.data
)
raw_to_block.append((len(blocks) - 1, 0))
finally:
last_was_empty_line = new_last_was_empty_line
# TODO: fix inconsistencies resulting from UsageErrors thrown in flush_bundles()
if len(raw_to_block) != len(raw_items):
print("WARNING: Length of raw_to_block does not match length of raw_items", file=sys.stderr)
# Package the result
block_to_raw = {}
next_interpreted_index = None
# Go in reverse order so we can assign raw items that map to None to the next interpreted item
for raw_index, interpreted_index in reversed(list(enumerate(raw_to_block))):
if interpreted_index is None: # e.g., blank line, directive
interpreted_index = next_interpreted_index
raw_to_block[raw_index] = interpreted_index
else:
interpreted_index_str = str(interpreted_index[0]) + ',' + str(interpreted_index[1])
if interpreted_index_str not in block_to_raw: # Bias towards the last item
block_to_raw[interpreted_index_str] = raw_index
next_interpreted_index = interpreted_index
# Return the result
result = {}
result['blocks'] = blocks
result['raw_to_block'] = raw_to_block
result['block_to_raw'] = block_to_raw
return result
def check_worksheet_not_frozen(worksheet):
if worksheet.frozen:
raise PermissionError(
'Cannot mutate frozen worksheet %s(%s).' % (worksheet.uuid, worksheet.name)
)
| 38.797872
| 130
| 0.558541
|
acfd78d454d509971c277e97d471031eed926f8e
| 2,240
|
py
|
Python
|
configs/lane_detection/resa/resnet18_tusimple.py
|
voldemortX/DeeplabV3_PyTorch1.3_Codebase
|
d22d23e74800fafb58eeb61d6649008745c1a287
|
[
"BSD-3-Clause"
] | 1
|
2020-09-17T06:21:39.000Z
|
2020-09-17T06:21:39.000Z
|
configs/lane_detection/resa/resnet18_tusimple.py
|
voldemortX/pytorch-segmentation
|
9c62c0a721d11c8ea6bf312ecf1c7b238a54dcda
|
[
"BSD-3-Clause"
] | null | null | null |
configs/lane_detection/resa/resnet18_tusimple.py
|
voldemortX/pytorch-segmentation
|
9c62c0a721d11c8ea6bf312ecf1c7b238a54dcda
|
[
"BSD-3-Clause"
] | null | null | null |
from importmagician import import_from
with import_from('./'):
# Data pipeline
from configs.lane_detection.common.datasets.tusimple_seg import dataset
from configs.lane_detection.common.datasets.train_level0_360 import train_augmentation
from configs.lane_detection.common.datasets.test_360 import test_augmentation
# Optimization pipeline
from configs.lane_detection.common.optims.segloss_7class import loss
from configs.lane_detection.common.optims.sgd006 import optimizer
from configs.lane_detection.common.optims.ep50_poly_warmup200 import lr_scheduler
train = dict(
exp_name='resnet18_resa_tusimple',
workers=4,
batch_size=5,
checkpoint=None,
# Device args
world_size=4,
dist_url='tcp://localhost:12345',
device='cuda',
val_num_steps=0, # Seg IoU validation (mostly useless)
save_dir='./checkpoints',
input_size=(360, 640),
original_size=(720, 1280),
num_classes=7,
num_epochs=50,
collate_fn=None, # 'dict_collate_fn' for LSTR
seg=True # Seg-based method or not
)
test = dict(
exp_name='resnet18_resa_tusimple',
workers=4,
batch_size=20,
checkpoint='./checkpoints/resnet18_resa_tusimple/model.pt',
# Device args
device='cuda',
save_dir='./checkpoints',
seg=True,
gap=10,
ppl=56,
thresh=0.3,
collate_fn=None, # 'dict_collate_fn' for LSTR
input_size=(360, 640),
original_size=(720, 1280),
max_lane=5,
dataset_name='tusimple'
)
model = dict(
name='RESA_Net',
backbone_cfg=dict(
name='predefined_resnet_backbone',
backbone_name='resnet18',
return_layer='layer3',
pretrained=True,
replace_stride_with_dilation=[False, True, True]
),
reducer_cfg=dict(
name='RESAReducer',
in_channels=256,
reduce=128
),
spatial_conv_cfg=dict(
name='RESA',
num_channels=128,
iteration=5,
alpha=2.0
),
classifier_cfg=dict(
name='BUSD',
in_channels=128,
num_classes=7
),
lane_classifier_cfg=dict(
name='EDLaneExist',
num_output=7 - 1,
flattened_size=4400,
dropout=0.1,
pool='avg'
)
)
| 25.168539
| 90
| 0.662054
|
acfd790ac4d72a6c7863ffd147db0c2293d79105
| 1,865
|
py
|
Python
|
Link_prediction_model/logger.py
|
amazon-research/gnn-tail-generalization
|
1ff49e62b8a2e2a7273c50dce59167ea9d9161fb
|
[
"Apache-2.0"
] | 15
|
2021-11-03T22:06:16.000Z
|
2022-03-26T06:56:48.000Z
|
Link_prediction_model/logger.py
|
amazon-research/gnn-tail-generalization
|
1ff49e62b8a2e2a7273c50dce59167ea9d9161fb
|
[
"Apache-2.0"
] | 1
|
2022-02-22T16:11:27.000Z
|
2022-03-01T07:09:35.000Z
|
Link_prediction_model/logger.py
|
amazon-research/gnn-tail-generalization
|
1ff49e62b8a2e2a7273c50dce59167ea9d9161fb
|
[
"Apache-2.0"
] | 3
|
2022-01-08T09:58:15.000Z
|
2022-03-21T22:43:16.000Z
|
# -*- coding: utf-8 -*-
import torch
import sys
class Logger(object):
def __init__(self, runs, info=None):
self.info = info
self.results = [[] for _ in range(runs)]
def add_result(self, run, result):
# assert len(result) == 2
assert run >= 0 and run < len(self.results)
self.results[run].append(result)
def print_statistics(self, run=None, f=sys.stdout, last_best=False):
if run is not None:
result = 100 * torch.tensor(self.results[run])
if last_best:
# get last max value index by reversing result tensor
argmax = result.size(0) - result[:, 0].flip(dims=[0]).argmax().item() - 1
else:
argmax = result[:, 0].argmax().item()
print(f'Run {run + 1:02d}:', file=f)
print(f'Highest Valid: {result[:, 0].max():.2f}', file=f)
print(f'Highest Eval Point: {argmax + 1}', file=f)
print(f' Final Test: {result[argmax, 1]:.2f}', file=f)
else:
result = 100 * torch.tensor(self.results)
best_results = []
for r in result:
valid = r[:, 0].max().item()
if last_best:
# get last max value index by reversing result tensor
argmax = r.size(0) - r[:, 0].flip(dims=[0]).argmax().item() - 1
else:
argmax = r[:, 0].argmax().item()
test = r[argmax, 1].item()
best_results.append((valid, test))
best_result = torch.tensor(best_results)
print(f'All runs:', file=f)
r = best_result[:, 0]
print(f'Highest Valid: {r.mean():.2f} {r.std():.2f}', file=f)
r = best_result[:, 1]
print(f' Final Test: {r.mean():.2f} {r.std():.2f}', file=f)
| 39.680851
| 89
| 0.50134
|
acfd7972c6a709e5d84d86fdba5bd727abb4ce14
| 1,472
|
py
|
Python
|
solutions/19. Remove Nth Node From End of List.py
|
JacopoPan/leetcode-top100-liked-questions
|
03dc05f087d05805d54b7585ce740338f3128833
|
[
"MIT"
] | null | null | null |
solutions/19. Remove Nth Node From End of List.py
|
JacopoPan/leetcode-top100-liked-questions
|
03dc05f087d05805d54b7585ce740338f3128833
|
[
"MIT"
] | null | null | null |
solutions/19. Remove Nth Node From End of List.py
|
JacopoPan/leetcode-top100-liked-questions
|
03dc05f087d05805d54b7585ce740338f3128833
|
[
"MIT"
] | null | null | null |
"""
Runtime: 32 ms, faster than 92.79% of Python3 online submissions for Remove Nth Node From End of List.
Memory Usage: 13.9 MB, less than 60.85% of Python3 online submissions for Remove Nth Node From End of List.
"""
from typing import List
from typing import Optional
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def removeNthFromEnd(self, head: Optional[ListNode], n: int) -> Optional[ListNode]:
if head is None:
return None
else:
temp = head
pos = 0
while temp is not None:
pos += 1
temp = temp.next
remove = pos - n + 1
if remove == 1:
return head.next
else:
temp = head
pos = 0
while temp is not None:
pos += 1
if pos+1 == remove:
next_node = temp.next
temp.next = next_node.next
temp = temp.next
return head
def main():
sol = Solution()
l = ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5)))))
ans = sol.removeNthFromEnd(l, 2)
list_ans = []
p = ans
while p is not None:
list_ans.append(p.val)
p = p.next
print('Output:', list_ans)
print('Expected:', [1,2,3,5])
if __name__ == "__main__":
main()
| 28.862745
| 107
| 0.514946
|
acfd799ddb8aeeb5eb7edee4622bf95ba165ebe9
| 4,930
|
py
|
Python
|
post_engine.py
|
shinux/tears
|
4ec3d362692bcf5563ecdb0bc10c83d7f236ecce
|
[
"MIT"
] | 6
|
2016-04-11T02:03:46.000Z
|
2019-08-26T06:12:55.000Z
|
post_engine.py
|
shinux/tears
|
4ec3d362692bcf5563ecdb0bc10c83d7f236ecce
|
[
"MIT"
] | null | null | null |
post_engine.py
|
shinux/tears
|
4ec3d362692bcf5563ecdb0bc10c83d7f236ecce
|
[
"MIT"
] | 1
|
2018-12-28T17:11:59.000Z
|
2018-12-28T17:11:59.000Z
|
import os
import sys
import yaml
import pymongo
from os import listdir
from tears import basedir
client = pymongo.MongoClient("localhost", 27017)
db = client.tears
post_collection = db.posts
category_collection = db.categories
tag_collection = db.tags
about_collection = db.about
link_collection = db.link
basedir += '/source/'
def get_all_file(target='posts'):
return [basedir + target + '/' + i for i in listdir(basedir + target)]
def generate_url(date, full_file_path):
file_name = full_file_path.split('/')[-1].split('.')[0]
return '/' + str(date.year) + '/' + str(date.month) + '/' + str(date.day) + '/' + file_name
def check_file():
"""check file then drop collection and insert into database"""
posts = get_all_file('posts')
for post in posts:
with open(post, 'r') as stream:
_stream = stream.read()
if len(_stream.split('---', 1)) < 1:
print('error on --- split between ')
return False
_dict = yaml.load(_stream.split('---', 1)[0])
if '---' in _dict.get('title'):
print('--- is not available in title')
return False
content = _stream.split('---', 1)[1]
if not content.replace(' ', '').replace(' ', ''):
print('content in {name} is blank'.format(name=stream.name))
return False
return True
# generate posts
def generate_posts():
# TODO: better check the files and backup the old file then insert
client.drop_database('tears')
category_dict = {}
tag_dict = {}
posts = get_all_file('posts')
if not posts:
print('no post until now')
else:
for post in posts:
with open(post, 'r') as stream:
_stream = stream.read()
_dict = yaml.load(_stream.split('---', 1)[0])
_content = _stream.split('---', 1)[1]
_dict.update({'content': _content})
_dict.update({'url': generate_url(_dict.get('date'), stream.name)})
post_id = post_collection.insert_one(_dict).inserted_id
current_category = _dict.get('categories')
if current_category:
if current_category in category_dict:
category_dict[current_category].append(post_id)
else:
category_dict[current_category] = [post_id]
for i in _dict.get('tags', []):
if i in tag_dict:
tag_dict[i].append(post_id)
else:
tag_dict[i] = [post_id]
# insert category and tag
for key, item in category_dict.items():
category_collection.insert_one({'name': key, 'posts': item})
for key, item in tag_dict.items():
tag_collection.insert_one({'name': key, 'posts': item})
about = get_all_file('about')
if not about:
print('no about until now')
else:
with open(about[0], 'r') as stream:
_stream = stream.read()
about_collection.insert_one({'content': _stream})
link = get_all_file('link')
if not link:
print('no link until now')
else:
with open(link[0], 'r') as stream:
_stream = stream.read()
link_collection.insert_one({'content': _stream})
def main():
if len(sys.argv) < 2:
valid = check_file()
if valid:
generate_posts()
return
if sys.argv[1] in ['-h', 'help', '-help']:
print("""
Tears command line:
-i init -init : check if not exist initiate the source dictionary.
-h help -hlep : show help
-c create -create [file name]: create a post md file in /posts folder.
-g generate -generate: generate blog insert markdown file into the mongo.
""")
return
if sys.argv[1] in ['-i', 'init', '-init']:
if not os.path.exists(basedir):
os.makedirs(basedir + '/about')
os.makedirs(basedir + '/link')
os.makedirs(basedir + '/posts')
return
if sys.argv[1] in ['-c', 'create', '-create']:
if len(sys.argv) < 2:
print('you have not specify the post name')
return
else:
file_name = sys.argv[2].split('.')[0] + '.md'
if os.path.isfile(basedir + '/posts/' + file_name):
print('{name} is already exist'.format(name=file_name))
return
else:
open(basedir + '/posts/' + file_name, 'w+')
print('file {name} generate successfully'.format(name=file_name))
return
if sys.argv[1] in ['-g', 'generate', '-generate']:
valid = check_file()
if valid:
generate_posts()
return
if __name__ == '__main__':
main()
| 32.434211
| 95
| 0.543205
|
acfd7b2c56f59ae2bccde54311fbe6ae0dc49223
| 2,710
|
py
|
Python
|
validator.py
|
tivaliy/cloudwatch_importer
|
46704a4001642a3d00ab1281bf11a4a8dbb900d4
|
[
"Apache-2.0"
] | 2
|
2017-04-26T19:22:14.000Z
|
2018-07-09T09:15:37.000Z
|
validator.py
|
tivaliy/cloudwatch_importer
|
46704a4001642a3d00ab1281bf11a4a8dbb900d4
|
[
"Apache-2.0"
] | null | null | null |
validator.py
|
tivaliy/cloudwatch_importer
|
46704a4001642a3d00ab1281bf11a4a8dbb900d4
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2017 Vitalii Kulanov
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import jsonschema
import six
import utils
logger = logging.getLogger(__name__)
CONFIG_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"url": {
"type": "string"
},
"aws-region": {
"type": "string"
},
"namespace": {
"type": "string"
},
"metrics": {
"items": {
"type": "string"
},
"type": "array"
}
},
"required": ["url", "aws-region", "namespace", "metrics"]
}
def validate_schema(data, schema, file_path, value_path=None):
try:
jsonschema.validate(data, schema)
except jsonschema.exceptions.ValidationError as exc:
logger.error(_make_error_message(exc, file_path, value_path),
exc_info=True)
raise
def validate_file_by_schema(schema, file_path):
logger.debug("Start schema validation for '{0}' file with schema: "
"{1}".format(file_path, schema))
data = utils.read_from_file(file_path)
if data is None:
raise ValueError("File '{0}' is empty".format(file_path))
validate_schema(data, schema, file_path)
return data
def _make_error_message(exc, file_path, value_path):
if value_path is None:
value_path = []
if exc.absolute_path:
value_path.extend(exc.absolute_path)
if exc.context:
sub_exceptions = sorted(
exc.context, key=lambda e: len(e.schema_path), reverse=True)
sub_message = sub_exceptions[0]
value_path.extend(list(sub_message.absolute_path)[2:])
message = sub_message.message
else:
message = exc.message
error_msg = "File '{0}', {1}".format(file_path, message)
if value_path:
value_path = ' -> '.join(map(six.text_type, value_path))
error_msg = '{0}, {1}'.format(
error_msg, "value path '{0}'".format(value_path))
return error_msg
| 29.456522
| 78
| 0.598155
|
acfd7b6c5b7abc86665ac10667275b5ab10f4f21
| 18,892
|
py
|
Python
|
blimp_env/tests/envs/test_planar_navigate_env.py
|
robot-perception-group/AutonomousBlimpDRL
|
a10a88b2e9c9f9a83435cff2e4bc7e16e83cfeee
|
[
"MIT"
] | 8
|
2021-11-21T20:47:37.000Z
|
2022-03-15T09:50:06.000Z
|
blimp_env/tests/envs/test_planar_navigate_env.py
|
robot-perception-group/AutonomousBlimpDRL
|
a10a88b2e9c9f9a83435cff2e4bc7e16e83cfeee
|
[
"MIT"
] | null | null | null |
blimp_env/tests/envs/test_planar_navigate_env.py
|
robot-perception-group/AutonomousBlimpDRL
|
a10a88b2e9c9f9a83435cff2e4bc7e16e83cfeee
|
[
"MIT"
] | null | null | null |
import pytest
from blimp_env.envs import PlanarNavigateEnv
from blimp_env.envs.common.gazebo_connection import GazeboConnection
from stable_baselines3.common.env_checker import check_env
import copy
import numpy as np
ENV = PlanarNavigateEnv
env_kwargs = {
"DBG": True,
"simulation": {
"auto_start_simulation": False,
},
"observation": {
"DBG_ROS": False,
"DBG_OBS": True,
},
"action": {
"DBG_ACT": True,
},
"target": {"DBG_ROS": False},
}
# ============== test env ==============#
def test_env_functions():
check_env(ENV(copy.deepcopy(env_kwargs)), warn=True)
GazeboConnection().unpause_sim()
def test_env_step():
env = ENV(copy.deepcopy(env_kwargs))
env.reset()
for _ in range(5):
action = env.action_space.sample()
obs, rew, terminal, info = env.step(action)
assert env.observation_space.contains(obs)
assert isinstance(rew, float)
assert isinstance(terminal, bool)
assert isinstance(info, dict)
assert rew >= -1 and rew <= 1
GazeboConnection().unpause_sim()
def test_compute_success_rew():
env = ENV(copy.deepcopy(env_kwargs))
fn = env.compute_success_rew
achieved_goal = np.ones(3)
desired_goal = achieved_goal
result = fn(achieved_goal, desired_goal)
expect = 1
np.testing.assert_allclose(result, expect)
achieved_goal = np.zeros(3)
desired_goal = achieved_goal
result = fn(achieved_goal, desired_goal)
expect = 1
np.testing.assert_allclose(result, expect)
achieved_goal = 1 * np.ones(3)
desired_goal = 2 * np.ones(3)
result = fn(achieved_goal, desired_goal)
expect = 1
np.testing.assert_allclose(result, expect)
achieved_goal = 10 * np.ones(3)
desired_goal = -10 * np.ones(3)
result = fn(achieved_goal, desired_goal)
expect = 0
np.testing.assert_allclose(result, expect)
for _ in range(100):
achieved_goal = np.random.uniform(-100, 100, 3)
desired_goal = np.random.uniform(-100, 100, 3)
rew = fn(achieved_goal, desired_goal)
assert isinstance(rew, float)
assert rew == 0.0 or rew == 1.0
def test_is_terminal(mocker):
env = ENV(copy.deepcopy(env_kwargs))
mock_fn = "blimp_env.envs.planar_navigate_env.PlanarNavigateEnv.compute_success_rew"
dummy_obs_info = {"position": np.array([0, 0, 0])}
env.config["duration"] = 100
env.steps = 5
mocker.patch(mock_fn, return_value=0.0)
result = env._is_terminal(dummy_obs_info)
expect = False
assert result == expect
env.config["duration"] = 100
env.steps = 5
mocker.patch(mock_fn, return_value=1.0)
result = env._is_terminal(dummy_obs_info)
expect = True
assert result == expect
env.config["duration"] = 100
env.steps = 200
mocker.patch(mock_fn, return_value=0.0)
result = env._is_terminal(dummy_obs_info)
expect = True
assert result == expect
env.config["duration"] = 100
env.steps = 5
mocker.patch(mock_fn, return_value=0.0)
result = env._is_terminal(dummy_obs_info)
expect = False
assert result == expect
def test_rew(mocker):
env = ENV(copy.deepcopy(env_kwargs))
env.config["tracking_reward_weights"] = np.array([0.1, 0.2, 0.3, 0.4])
env.config["reward_weights"] = np.array([1, 0.95, 0.05])
mock_fn = "blimp_env.envs.planar_navigate_env.PlanarNavigateEnv.compute_success_rew"
dummy_obs_info = {"position": np.array([0, 0, 0])}
def dummy_act_rew():
return 0
mocker.patch(mock_fn, return_value=1.0)
obs = np.zeros(9)
obs[1] = -1
result, _ = env._reward(obs, [], dummy_obs_info)
expect = 1
np.testing.assert_allclose(result, expect)
env.action_type.action_rew = dummy_act_rew
mocker.patch(mock_fn, return_value=0.0)
obs = np.zeros(9)
obs[1] = -1
result, _ = env._reward(obs, [], dummy_obs_info)
expect = 0
np.testing.assert_allclose(result, expect)
obs = np.zeros(9)
obs[0] = 1
obs[1] = -1
result, _ = env._reward(obs, [], dummy_obs_info)
expect = -0.1 * 0.95
np.testing.assert_allclose(result, expect)
obs = np.zeros(9)
obs[1] = 1
result, _ = env._reward(obs, [], dummy_obs_info)
expect = -0.2 * 0.95
np.testing.assert_allclose(result, expect)
obs = np.zeros(9)
obs[2] = 1
obs[1] = -1
result, _ = env._reward(obs, [], dummy_obs_info)
expect = -0.3 * 0.95
np.testing.assert_allclose(result, expect)
obs = np.zeros(9)
obs[3] = 1
obs[1] = -1
result, _ = env._reward(obs, [], dummy_obs_info)
expect = -0.4 * 0.95
np.testing.assert_allclose(result, expect)
obs = np.zeros(9)
obs[4] = 1
obs[1] = -1
result, _ = env._reward(obs, [], dummy_obs_info)
expect = 0
np.testing.assert_allclose(result, expect)
def dummy_act_rew():
return -1
env.action_type.action_rew = dummy_act_rew
mocker.patch(mock_fn, return_value=0.0)
obs = np.ones(9)
result, _ = env._reward(obs, [], dummy_obs_info)
expect = -1.0
np.testing.assert_allclose(result, expect)
obs = np.zeros(9)
obs[1] = -1
result, _ = env._reward(obs, [], dummy_obs_info)
expect = -0.05
np.testing.assert_allclose(result, expect)
# ============== test obs ==============#
def test_compute_psi_diff():
env = ENV(copy.deepcopy(env_kwargs))
fn = env.observation_type.compute_psi_diff
goal_pos, obs_pos, obs_psi = np.array([1, 0, 0]), np.zeros(3), 0
result = fn(goal_pos, obs_pos, obs_psi)
expect = 0.0
np.testing.assert_allclose(result, expect)
goal_pos, obs_pos, obs_psi = np.ones(3), np.zeros(3), 0
result = fn(goal_pos, obs_pos, obs_psi)
expect = 0.25 * np.pi
np.testing.assert_allclose(result, expect)
goal_pos, obs_pos, obs_psi = np.array([0, 1, 0]), np.zeros(3), 0
result = fn(goal_pos, obs_pos, obs_psi)
expect = 0.5 * np.pi
np.testing.assert_allclose(result, expect)
goal_pos, obs_pos, obs_psi = np.array([-1, 1, 0]), np.zeros(3), 0
result = fn(goal_pos, obs_pos, obs_psi)
expect = 0.75 * np.pi
np.testing.assert_allclose(result, expect)
goal_pos, obs_pos, obs_psi = np.array([-1, 1e-9, 0]), np.zeros(3), 0
result = fn(goal_pos, obs_pos, obs_psi)
expect = np.pi
np.testing.assert_allclose(result, expect)
goal_pos, obs_pos, obs_psi = np.array([1, -1, 0]), np.zeros(3), 0
result = fn(goal_pos, obs_pos, obs_psi)
expect = -0.25 * np.pi
np.testing.assert_allclose(result, expect)
goal_pos, obs_pos, obs_psi = np.array([0, -1, 0]), np.zeros(3), 0
result = fn(goal_pos, obs_pos, obs_psi)
expect = -0.5 * np.pi
np.testing.assert_allclose(result, expect)
goal_pos, obs_pos, obs_psi = np.array([-1, -1, 0]), np.zeros(3), 0
result = fn(goal_pos, obs_pos, obs_psi)
expect = -0.75 * np.pi
np.testing.assert_allclose(result, expect)
goal_pos, obs_pos, obs_psi = np.array([-1, 0, 0]), np.zeros(3), 0
result = fn(goal_pos, obs_pos, obs_psi)
expect = -np.pi
np.testing.assert_allclose(result, expect)
goal_pos, obs_pos, obs_psi = np.zeros(3), np.ones(3), 0
result = fn(goal_pos, obs_pos, obs_psi)
expect = -0.75 * np.pi
np.testing.assert_allclose(result, expect)
goal_pos, obs_pos, obs_psi = np.ones(3), -np.ones(3), 0.5 * np.pi
result = fn(goal_pos, obs_pos, obs_psi)
expect = -0.25 * np.pi
np.testing.assert_allclose(result, expect)
for _ in range(100):
goal_pos = np.random.uniform(-1, 1, 3)
obs_pos = np.random.uniform(-1, 1, 3)
obs_psi = np.random.uniform(-1, 1)
result = fn(goal_pos, obs_pos, obs_psi)
assert isinstance(result, float)
assert result >= -np.pi and result <= np.pi
def get_test_scale_obs_dict_io():
in_list, out_list = [], []
in_list.append(
{
"z_diff": np.array(100),
"planar_dist": np.array(200 * np.sqrt(2)),
"psi_diff": np.array(np.pi),
"vel_diff": np.array(11.5),
"vel": np.array(11.5),
}
)
out_list.append(
{
"z_diff": 1,
"planar_dist": 1,
"psi_diff": 1,
"vel_diff": 1,
"vel": 1,
}
)
in_list.append(
{
"z_diff": np.array(0),
"planar_dist": np.array(100 * np.sqrt(2)),
"psi_diff": np.array(0),
"vel_diff": np.array(0),
"vel": np.array(11.5 / 2),
}
)
out_list.append(
{
"z_diff": 0,
"planar_dist": 0,
"psi_diff": 0,
"vel_diff": 0,
"vel": 0,
}
)
in_list.append(
{
"z_diff": np.array(-100),
"planar_dist": np.array(0),
"psi_diff": np.array(-np.pi),
"vel_diff": np.array(-11.5),
"vel": np.array(0),
}
)
out_list.append(
{
"z_diff": -1,
"planar_dist": -1,
"psi_diff": -1,
"vel_diff": -1,
"vel": -1,
}
)
in_list.append(
{
"z_diff": np.array(50),
"planar_dist": np.array(150 * np.sqrt(2)),
"psi_diff": np.array(0.5 * np.pi),
"vel_diff": np.array(0.5 * 11.5),
"vel": np.array(0.75 * 11.5),
}
)
out_list.append(
{
"z_diff": 0.5,
"planar_dist": 0.5,
"psi_diff": 0.5,
"vel_diff": 0.5,
"vel": 0.5,
}
)
return in_list, out_list
@pytest.mark.parametrize(
"idx", [i for i in range(len(get_test_scale_obs_dict_io()[0]))]
)
def test_scale_obs_dict(idx):
env = ENV(copy.deepcopy(env_kwargs))
fn = env.observation_type.scale_obs_dict
input, expect = get_test_scale_obs_dict_io()
result = fn(input[idx], 0.0)
for k, _ in result.items():
np.testing.assert_allclose(result[k], expect[idx][k])
def get_test_process_obs_io():
in_list, out_list = [], []
obs_dict = {
"position": np.array([10, 10, 10]),
"velocity": np.array([3, 2, 1]),
"angle": np.array([0, 0, np.pi]),
}
goal_dict = {
"position": np.array([20, 20, 50]),
"velocity": 2.5,
}
in_list.append([obs_dict, goal_dict, False])
out_list.append(
{
"z_diff": -40,
"planar_dist": 10 * np.sqrt(2),
"psi_diff": -0.75 * np.pi,
"vel_diff": np.sqrt(14) - 2.5,
"vel": np.sqrt(14),
}
)
obs_dict = {
"position": np.array([40, 0, 70]),
"velocity": np.array([3, 3, 3]),
"angle": np.array([0, 0, 0]),
}
goal_dict = {
"position": np.array([20, 20, 50]),
"velocity": 5,
}
in_list.append([obs_dict, goal_dict, False])
out_list.append(
{
"z_diff": 20,
"planar_dist": 20 * np.sqrt(2),
"psi_diff": 0.75 * np.pi,
"vel_diff": np.sqrt(27) - 5,
"vel": np.sqrt(27),
}
)
return in_list, out_list
@pytest.mark.parametrize("idx", [i for i in range(len(get_test_process_obs_io()[0]))])
def test_process_obs(idx):
env = ENV(copy.deepcopy(env_kwargs))
fn = env.observation_type.process_obs
input, expect = get_test_process_obs_io()
result = fn(*input[idx])
for k, _ in result.items():
np.testing.assert_allclose(result[k], expect[idx][k])
# ============== test act ==============#
def get_test_process_action_io():
in_list, out_list = [], []
in_list.append([np.array([0, 0, 0, 0]), np.array([0.0, 0.0, 0.0, 0.0])])
out_list.append(np.array([0, 0, 0, 0]))
in_list.append([np.array([0, 0, 1, 0]), np.array([0.0, 0.0, 0.0, 0.0])])
out_list.append(np.array([0, 0, 0, 0]))
in_list.append([np.array([0, 0, 0, -1]), np.array([0.0, 0.0, 0.0, 0.0])])
out_list.append(np.array([0, 0, 0, 0]))
in_list.append([np.array([1, 1, 1, 1]), np.array([0.0, 0.0, 0.0, 0.0])])
out_list.append(np.array([0.1, 0.1, 0.0, 0.04]))
in_list.append([np.array([-1, -1, -1, -1]), np.array([0.0, 0.0, 0.0, 0.0])])
out_list.append(np.array([-0.1, -0.1, -0.1, 0.0]))
in_list.append([np.array([1, 1, 1, 1]), np.array([0.0, 0.0, -0.2, 0.0])])
out_list.append(np.array([0.1, 0.1, -0.1, 0.04]))
return in_list, out_list
@pytest.mark.parametrize(
"idx", [i for i in range(len(get_test_process_action_io()[0]))]
)
def test_process_action(idx):
env = ENV(copy.deepcopy(env_kwargs))
fn = env.action_type.process_action
env.action_type.forward_servo=True
env.action_type.disable_servo=False
input, expect = get_test_process_action_io()
result = fn(
*input[idx],
)
np.testing.assert_allclose(result, expect[idx])
def test_process_actuator_state():
env = ENV(copy.deepcopy(env_kwargs))
fn = env.action_type.process_actuator_state
input = np.array([0, 0, 0, 0])
expect = 1500 * np.ones(12)
result = fn(input, 0)
np.testing.assert_allclose(result, np.expand_dims(expect, axis=1))
input = np.array([1.5, 1.4, 1.3, 1.2])
expect = np.array(
[2000, 2000, 2000, 2000, 2000, 2000, 2000, 1500, 2000, 1500, 1500, 1500]
)
result = fn(input, 0)
np.testing.assert_allclose(result, np.expand_dims(expect, axis=1))
input = np.array([-1.2, -1.3, -1.4, -1.5])
expect = np.array(
[1000, 1000, 1000, 1000, 1000, 1000, 1000, 1500, 1000, 1500, 1500, 1500]
)
result = fn(input, 0)
np.testing.assert_allclose(result, np.expand_dims(expect, axis=1))
input = np.array([1, 0, 0, 0])
expect = np.array(
[2000, 1500, 1500, 2000, 2000, 1500, 1500, 1500, 1500, 1500, 1500, 1500]
)
result = fn(input, 0)
np.testing.assert_allclose(result, np.expand_dims(expect, axis=1))
input = np.array([0, 1, 0, 0])
expect = np.array(
[1500, 2000, 2000, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500]
)
result = fn(input, 0)
np.testing.assert_allclose(result, np.expand_dims(expect, axis=1))
input = np.array([0, 0, 1, 0])
expect = np.array(
[1500, 1500, 1500, 1500, 1500, 2000, 1500, 1500, 1500, 1500, 1500, 1500]
)
result = fn(input, 0)
np.testing.assert_allclose(result, np.expand_dims(expect, axis=1))
input = np.array([0, 0, 0, 1])
expect = np.array(
[1500, 1500, 1500, 1500, 1500, 1500, 2000, 1500, 2000, 1500, 1500, 1500]
)
result = fn(input, 0)
np.testing.assert_allclose(result, np.expand_dims(expect, axis=1))
def test_match_channel():
env = ENV(copy.deepcopy(env_kwargs))
fn = env.action_type.match_channel
input = np.array([1500, 1500, 1500, 1500])
expect = 1500 * np.ones(12)
result = fn(input)
np.testing.assert_allclose(result, np.expand_dims(expect, axis=1))
input = np.array([2000, 1500, 1500, 1500])
expect = np.array(
[2000, 1500, 1500, 2000, 2000, 1500, 1500, 1500, 1500, 1500, 1500, 1500]
)
result = fn(input)
np.testing.assert_allclose(result, np.expand_dims(expect, axis=1))
input = np.array([1500, 2000, 1500, 1500])
expect = np.array(
[1500, 2000, 2000, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500]
)
result = fn(input)
np.testing.assert_allclose(result, np.expand_dims(expect, axis=1))
input = np.array([1500, 1500, 2000, 1500])
expect = np.array(
[1500, 1500, 1500, 1500, 1500, 2000, 1500, 1500, 1500, 1500, 1500, 1500]
)
result = fn(input)
np.testing.assert_allclose(result, np.expand_dims(expect, axis=1))
input = np.array([1500, 1500, 1500, 2000])
expect = np.array(
[1500, 1500, 1500, 1500, 1500, 1500, 2000, 1500, 2000, 1500, 1500, 1500]
)
result = fn(input)
np.testing.assert_allclose(result, np.expand_dims(expect, axis=1))
def test_action_rew():
env = ENV(copy.deepcopy(env_kwargs))
fn = env.action_type.action_rew
scale = np.array([0.5, 1, 1])
env.action_type.cur_act = np.array([0, 0, 0, 0])
result = fn(scale)
expect = 0
np.testing.assert_allclose(result, expect)
env.action_type.cur_act = np.array([1, 0, 0, 0])
result = fn(scale)
expect = -0.2
np.testing.assert_allclose(result, expect)
env.action_type.cur_act = np.array([0, 0, 0, 1])
result = fn(scale)
expect = -0.8
np.testing.assert_allclose(result, expect)
env.action_type.cur_act = np.array([1, 0, 0, 1])
result = fn(scale)
expect = -1
np.testing.assert_allclose(result, expect)
def test_get_cur_act():
env = ENV(copy.deepcopy(env_kwargs))
fn = env.action_type.get_cur_act
env.action_type.cur_act = np.array([0, 0, 0, 0])
result = fn()
expect = np.array([0, 0, 0, 0, 0, 0, 0, 0]).reshape(-1)
np.testing.assert_allclose(result, expect)
env.action_type.cur_act = np.array([1, 0, 0, 0])
result = fn()
expect = np.array([1, 0, 0, 1, 1, 0, 0, 0]).reshape(-1)
np.testing.assert_allclose(result, expect)
env.action_type.cur_act = np.array([0, 1, 0, 0])
result = fn()
expect = np.array([0, 1, 1, 0, 0, 0, 0, 0]).reshape(-1)
np.testing.assert_allclose(result, expect)
env.action_type.cur_act = np.array([0, 0, 1, 0])
result = fn()
expect = np.array([0, 0, 0, 0, 0, 1, 0, 0]).reshape(-1)
np.testing.assert_allclose(result, expect)
env.action_type.cur_act = np.array([0, 0, 0, 1])
result = fn()
expect = np.array([0, 0, 0, 0, 0, 0, 1, 1]).reshape(-1)
np.testing.assert_allclose(result, expect)
env.action_type.cur_act = np.array([1, 1, 1, 1])
result = fn()
expect = np.array([1, 1, 1, 1, 1, 1, 1, 1]).reshape(-1)
np.testing.assert_allclose(result, expect)
def test_check_action_publisher_connection():
env = ENV(copy.deepcopy(env_kwargs))
GazeboConnection().unpause_sim()
connected = env.action_type.check_publishers_connection()
assert connected == True
# ============== test target ==============#
def test_sample():
env = ENV(copy.deepcopy(env_kwargs))
fn = env.target_type.sample
goal = fn()
assert isinstance(goal, dict)
assert isinstance(goal["position"], np.ndarray)
assert isinstance(goal["velocity"], float)
assert isinstance(goal["angle"], np.ndarray)
env.target_type.pos_cmd_data = np.array([1, 2, 3])
env.target_type.vel_cmd_data = 5.0
env.target_type.ang_cmd_data = np.array([5, 6, 7])
goal = fn()
np.testing.assert_allclose(goal["position"], np.array([1, 2, 3]))
np.testing.assert_allclose(goal["velocity"], 5.0)
np.testing.assert_allclose(goal["angle"], np.array([5, 6, 7]))
def test_timeout_handle():
env = ENV(copy.deepcopy(env_kwargs))
result = env.target_type.timeout_handle()
assert result == {"kill_goal_reply": 0, "spawn_goal_reply": 0}
| 29.657771
| 88
| 0.596178
|
acfd7be48b490ed2361e44c049acff97981a4ebe
| 32,658
|
py
|
Python
|
CiscoWebexTeams.py
|
marksull/err-backend-cisco-webex-teams
|
cb81edab9c06ffe23ffe78456df81aaf57a80c19
|
[
"MIT"
] | 21
|
2018-08-28T05:49:45.000Z
|
2022-01-26T04:09:21.000Z
|
CiscoWebexTeams.py
|
marksull/err-backend-cisco-webex-teams
|
cb81edab9c06ffe23ffe78456df81aaf57a80c19
|
[
"MIT"
] | 17
|
2018-08-28T06:34:54.000Z
|
2021-07-30T09:41:45.000Z
|
CiscoWebexTeams.py
|
marksull/err-backend-cisco-webex-teams
|
cb81edab9c06ffe23ffe78456df81aaf57a80c19
|
[
"MIT"
] | 6
|
2018-10-08T02:14:08.000Z
|
2021-07-29T14:49:20.000Z
|
import sys
import json
import uuid
import string
import random
import asyncio
import copyreg
import logging
import websockets
from copy import copy
from enum import Enum
from base64 import b64encode
from markdown import markdown
from errbot.core import ErrBot
from errbot.backends.base import (
Message,
Person,
Room,
RoomOccupant,
OFFLINE,
RoomDoesNotExistError,
Stream,
)
from errbot import rendering
import webexteamssdk
from webexteamssdk.models.cards import AdaptiveCard
__version__ = "1.13.0"
log = logging.getLogger("errbot.backends.CiscoWebexTeams")
CISCO_WEBEX_TEAMS_MESSAGE_SIZE_LIMIT = 7439
DEVICES_URL = "https://wdm-a.wbx2.com/wdm/api/v1/devices"
DEVICE_DATA = {
"deviceName": "pywebsocket-client",
"deviceType": "DESKTOP",
"localizedModel": "python",
"model": "python",
"name": f"python-webex-teams-client-{''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))}",
"systemName": "python-webex-teams-client",
"systemVersion": "0.1",
}
# TODO - Need to look at service catalog (somehow?) to determine cluster
# for now, static to us cluster
HYDRA_PREFIX = "ciscospark://us"
class HydraTypes(Enum):
# https://github.com/webex/webex-js-sdk/blob/master/packages/node_modules/%40webex/common/src/constants.js#L62
ATTACHMENT_ACTION = "ATTACHMENT_ACTION"
CONTENT = "CONTENT"
MEMBERSHIP = "MEMBERSHIP"
MESSAGE = "MESSAGE"
ORGANIZATION = "ORGANIZATION"
PEOPLE = "PEOPLE"
ROOM = "ROOM"
TEAM = "TEAM"
class FailedToCreateWebexDevice(Exception):
pass
class FailedToFindWebexTeamsPerson(Exception):
pass
class FailedToFindWebexTeamsRoom(Exception):
pass
class CiscoWebexTeamsMessage(Message):
"""
A Cisco Webex Teams Message
"""
def __init__(self, *args, **kwargs):
super(CiscoWebexTeamsMessage, self).__init__(*args, **kwargs)
self.card = None
self.card_action = None
self.files = None
@property
def is_direct(self) -> bool:
return self.extras["roomType"] == "direct"
@property
def is_group(self) -> bool:
return not self.is_direct
class CiscoWebexTeamsPerson(Person):
"""
A Cisco Webex Teams Person
"""
def __init__(self, backend, attributes=None):
self._backend = backend
attributes = attributes or {}
if isinstance(attributes, webexteamssdk.Person):
self.teams_person = attributes
else:
self.teams_person = webexteamssdk.Person(attributes)
@property
def id(self):
return self.teams_person.id
@id.setter
def id(self, val):
self.teams_person._json_data["id"] = val
@property
def emails(self):
return self.teams_person.emails
@emails.setter
def emails(self, val):
self.teams_person._json_data["emails"] = val
@property
def email(self):
if type(self.emails) is list:
if len(self.emails):
# Note sure why a person can have multiple email addresses
return self.emails[0]
return None
@email.setter
def email(self, val):
self.emails = [val]
@property
def aclattr(self):
return self.teams_person.email
@property
def displayName(self):
return self.teams_person.displayName
@property
def created(self):
return self.teams_person.created
@property
def avatar(self):
return self.teams_person.avatar
def find_using_email(self):
"""
Return the FIRST Cisco Webex Teams person found when searching using an email address
"""
try:
for person in self._backend.webex_teams_api.people.list(email=self.email):
self.teams_person = person
return
except:
raise FailedToFindWebexTeamsPerson(
f"Could not find a user using the email address {self.email}"
)
def find_using_name(self):
"""
Return the FIRST Cisco Webex Teams person found when searching using the display name
"""
try:
for person in self._backend.webex_teams_api.people.list(
displayName=self.displayName
):
self.teams_person = person
return
except:
raise FailedToFindWebexTeamsPerson(
f"Could not find the user using the displayName {self.displayName}"
)
def get_using_id(self):
"""
Return a Cisco Webex Teams person when searching using an ID
"""
try:
self.teams_person = self._backend.webex_teams_api.people.get(self.id)
except:
raise FailedToFindWebexTeamsPerson(
f"Could not find the user using the id {self.id}"
)
# Required by the Err API
@property
def person(self):
return self.email
@property
def client(self):
return ""
@property
def nick(self):
return ""
@property
def fullname(self):
return self.displayName
def json(self):
return self.teams_person.json()
def __eq__(self, other):
return str(self) == str(other)
def __unicode__(self):
return self.email
__str__ = __unicode__
class CiscoWebexTeamsRoomOccupant(CiscoWebexTeamsPerson, RoomOccupant):
"""
A Cisco Webex Teams Person that Occupies a Cisco Webex Teams Room
"""
def __init__(self, backend, room=None, person=None):
room = room or {}
person = person or {}
if isinstance(room, CiscoWebexTeamsRoom):
self._room = room
else:
self._room = CiscoWebexTeamsRoom(backend=backend, room_id=room)
if isinstance(person, CiscoWebexTeamsPerson):
self.teams_person = person
else:
self.teams_person = CiscoWebexTeamsPerson(
backend=backend, attributes=person
)
@property
def room(self):
return self._room
class CiscoWebexTeamsRoom(Room):
"""
A Cisco Webex Teams Room
"""
def __init__(self, backend, room_id=None, room_title=None):
self._backend = backend
self._room_id = room_id
self._room_title = room_title
self._room = None
if room_id is not None and room_title is not None:
raise ValueError("room_id and room_title are mutually exclusive")
if not room_id and not room_title:
raise ValueError("room_id or room_title is needed")
if room_title is not None:
self.load_room_from_title()
else:
self.load_room_from_id()
def load_room_from_title(self):
"""
Load a room object from a title. If no room is found, return a new Room object.
"""
rooms = self._backend.webex_teams_api.rooms.list()
room = [room for room in rooms if room.title == self._room_title]
if not len(room) > 0:
self._room = webexteamssdk.models.immutable.Room({})
self._room_id = None
else:
# TODO: not sure room title will duplicate
self._room = room[0]
self._room_id = self._room.id
def load_room_from_id(self):
"""
Load a room object from a webex room id. If no room is found, return a new Room object.
"""
try:
self._room = self._backend.webex_teams_api.rooms.get(self._room_id)
self._room_title = self._room.title
except webexteamssdk.exceptions.ApiError:
self._room = webexteamssdk.models.immutable.Room({})
@property
def id(self):
"""Return the ID of this room"""
return self._room_id
@property
def room(self):
"""Return the webexteamssdk.models.immutable.Room instance"""
return self._room
@property
def created(self):
return self._room.created
@property
def title(self):
return self._room_title
@property
def type(self):
return self._room.type
# Errbot API
def join(self, username=None, password=None):
log.debug(f"Joining room {self.title} ({self.id})")
try:
self._backend.webex_teams_api.memberships.create(
self.id, self._backend.bot_identifier.id
)
log.debug(
f"{self._backend.bot_identifier.displayName} is NOW a member of {self.title} ({self.id}"
)
except webexteamssdk.exceptions.ApiError as error:
# API now returning a 403 when trying to add user to a direct conversation and they are already in the
# conversation. For groups if the user is already a member a 409 is returned.
if error.response.status_code == 403 or error.response.status_code == 409:
log.debug(
f"{self._backend.bot_identifier.displayName} is already a member of {self.title} ({self.id})"
)
else:
log.exception(
f"HTTP Exception: Failed to join room {self.title} ({self.id})"
)
return
except Exception:
log.exception("Failed to join room {} ({})".format(self.title, self.id))
return
def leave(self, reason=None):
log.debug("Leave room yet to be implemented") # TODO
pass
def create(self):
"""
Create a new room. Membership to the room is provide by default.
"""
self._room = self._backend.webex_teams_api.rooms.create(self.title)
self._room_id = self._room.id
self._backend.webex_teams_api.messages.create(
roomId=self._room_id, text="Welcome to the room!"
)
log.debug(f"Created room: {self.title}")
def destroy(self):
"""
Destroy (delete) a room
:return:
"""
self._backend.webex_teams_api.rooms.delete(self.id)
# We want to re-init this room so that is accurately reflects that is no longer exists
self.load_room_from_title()
log.debug(f"Deleted room: {self.title}")
@property
def exists(self):
return not self._room.created == None
@property
def joined(self):
rooms = self._backend.webex_teams_api.rooms.list()
return len([room for room in rooms if room.title == room.title]) > 0
@property
def topic(self):
return self.title
@topic.setter
def topic(self, topic):
log.debug("Topic room yet to be implemented") # TODO
pass
@property
def occupants(self):
if not self.exists:
raise RoomDoesNotExistError(
f"Room {self.title or self.id} does not exist, or the bot does not have access"
)
occupants = []
for person in self._backend.webex_teams_api.memberships.list(roomId=self.id):
p = CiscoWebexTeamsPerson(backend=self._backend)
p.id = person.personId
p.email = person.personEmail
occupants.append(
CiscoWebexTeamsRoomOccupant(backend=self._backend, room=self, person=p)
)
log.debug(
"Total occupants for room {} ({}) is {} ".format(
self.title, self.id, len(occupants)
)
)
return occupants
def invite(self, *args):
log.debug("Invite room yet to be implemented") # TODO
pass
def __eq__(self, other):
return str(self) == str(other)
def __unicode__(self):
return self.title
__str__ = __unicode__
class CiscoWebexTeamsBackend(ErrBot):
"""
This is the CiscoWebexTeams backend for errbot.
"""
def __init__(self, config):
super().__init__(config)
bot_identity = config.BOT_IDENTITY
self.md = rendering.md()
# Do we have the basic mandatory config needed to operate the bot
self._bot_token = bot_identity.get("TOKEN", None)
if not self._bot_token:
log.fatal(
"You need to define the Cisco Webex Teams Bot TOKEN in the BOT_IDENTITY of config.py."
)
sys.exit(1)
# Adjust message size limit to cater for the non-standard size limit
if config.MESSAGE_SIZE_LIMIT > CISCO_WEBEX_TEAMS_MESSAGE_SIZE_LIMIT:
log.info(
"Capping MESSAGE_SIZE_LIMIT to {} which is the maximum length allowed by CiscoWebexTeams".format(
CISCO_WEBEX_TEAMS_MESSAGE_SIZE_LIMIT
)
)
config.MESSAGE_SIZE_LIMIT = CISCO_WEBEX_TEAMS_MESSAGE_SIZE_LIMIT
log.debug("Setting up SparkAPI")
self.webex_teams_api = webexteamssdk.WebexTeamsAPI(access_token=self._bot_token)
log.debug("Setting up device on Webex Teams")
self.device_info = self._get_device_info()
log.debug("Fetching and building identifier for the bot itself.")
self.bot_identifier = CiscoWebexTeamsPerson(
self, self.webex_teams_api.people.me()
)
log.debug("Done! I'm connected as {}".format(self.bot_identifier.email))
self._register_identifiers_pickling()
@property
def mode(self):
return "CiscoWebexTeams"
def is_from_self(self, message):
return message.frm.id == message.to.id
def process_websocket(self, message):
"""
Process the data from the websocket and determine if we need to ack on it
:param message: The message received from the websocket
:return:
"""
message = json.loads(message.decode("utf-8"))
if message["data"]["eventType"] != "conversation.activity":
logging.debug(
"Ignoring message where Event Type is not conversation.activity"
)
return
activity = message["data"]["activity"]
new_message = None
if activity["verb"] == "post":
new_message = self.webex_teams_api.messages.get(
self.build_hydra_id(activity["id"])
)
if new_message.personEmail in self.bot_identifier.emails:
logging.debug("Ignoring message from myself")
return
logging.info(
f"Message from {new_message.personEmail}: {new_message.text}\n"
)
self.callback_message(self.get_message(new_message))
return
if activity["verb"] == "cardAction":
new_message = self.webex_teams_api.attachment_actions.get(
self.build_hydra_id(
activity["id"], message_type=HydraTypes.ATTACHMENT_ACTION.value
)
)
callback_card = new_message.inputs.get("_callback_card")
# When a cardAction is sent it includes the messageId of the message from which
# the card triggered the action, but includes no parentId that we need to be able
# to remain within a thread. So we need to take the messageID and lookup the details
# of the message to be ble to determine the parentID.
reply_message = self.webex_teams_api.messages.get(new_message.messageId)
new_message.parentId = reply_message.parentId
self.callback_card(self.get_card_message(new_message), callback_card)
return
if not new_message:
logging.debug(
f'Ignoring message where the verb is not type "post" or "cardAction". Verb is {activity["verb"]}'
)
def callback_card(self, message, callback_card):
"""
Process a card callback.
:param message: Message to be processed
:param callback_card: Function to trigger
"""
if not callback_card:
callback_card = "callback_card"
for plugin in self.plugin_manager.get_all_active_plugins():
plugin_name = plugin.name
log.debug(f"Triggering {callback_card} on {plugin_name}.",)
# noinspection PyBroadException
try:
# As this is a custom callback specific to this backend, there is no
# expectation that all plugins with have implemented this method
if hasattr(plugin, callback_card):
getattr(plugin, callback_card)(message)
except Exception:
log.exception(f"{callback_card} on {plugin_name} crashed.")
def get_card_message(self, message):
"""
Create an errbot message object with attached card
:param message: Message to be processed
:return:
"""
card_person = CiscoWebexTeamsPerson(self)
card_person.id = message.personId
card_person.get_using_id()
try:
parent_id = message.parentId
except AttributeError:
parent_id = message.id
card_room = CiscoWebexTeamsRoom(backend=self, room_id=message.roomId)
card_occupant = CiscoWebexTeamsRoomOccupant(
self, person=card_person, room=card_room
)
card_msg = CiscoWebexTeamsMessage(
body="",
frm=card_occupant,
to=card_room,
parent=parent_id,
extras={"roomType": card_room.type},
)
card_msg.card_action = message
return card_msg
def get_message(self, message):
"""
Create an errbot message object
:param message: The message to be processed
:return:
"""
person = CiscoWebexTeamsPerson(self)
person.id = message.id
try:
person.email = message.personEmail
except AttributeError:
person.get_using_id()
try:
parent_id = message.parentId
except AttributeError:
parent_id = message.id
room = CiscoWebexTeamsRoom(backend=self, room_id=message.roomId)
occupant = CiscoWebexTeamsRoomOccupant(self, person=person, room=room)
msg = CiscoWebexTeamsMessage(
body=message.markdown or message.text,
frm=occupant,
to=room,
parent=parent_id,
extras={"roomType": message.roomType},
)
return msg
def follow_room(self, room):
"""
Backend: Follow Room yet to be implemented
:param room:
:return:
"""
log.debug("Backend: Follow Room yet to be implemented") # TODO
def rooms(self):
"""
Backend: Rooms that the bot is a member of
:return:
List of rooms
"""
return [
f"{room.title} ({room.type})" for room in self.webex_teams_api.rooms.list()
]
def contacts(self):
"""
Backend: Contacts yet to be implemented
:return:
"""
log.debug("Backend: Contacts yet to be implemented") # TODO
def build_identifier(self, strrep):
"""
Build an errbot identifier using the Webex Teams email address of the person
:param strrep: The email address of the Cisco Webex Teams person
:return: CiscoWebexTeamsPerson
"""
person = CiscoWebexTeamsPerson(self)
person.email = strrep
person.find_using_email()
return person
def query_room(self, room_id_or_name):
"""
Create a CiscoWebexTeamsRoom object identified by the ID or name of the room
:param room_id_or_name:
The Cisco Webex Teams room ID or a room name
:return:
:class: CiscoWebexTeamsRoom
"""
if isinstance(room_id_or_name, webexteamssdk.Room):
return CiscoWebexTeamsRoom(backend=self, room_id=room_id_or_name.id)
# query_room can provide us either a room name of an ID, so we need to check
# for both
room = CiscoWebexTeamsRoom(backend=self, room_id=room_id_or_name)
if not room.exists:
room = CiscoWebexTeamsRoom(backend=self, room_title=room_id_or_name)
return room
def send_card(self, mess):
"""
Send a card out to Webex Teams.
:param mess: A CiscoWebexTeamsMessage
"""
if not hasattr(mess, "card"):
mess.card = []
# card backward compatibility for now based on previous contribution
if hasattr(mess, "layout"):
mess.card = mess.layout
if not isinstance(mess.card, list) and mess.card is not None:
mess.card = [mess.card]
# webebteamssdk currently has a bug that to results in cards not being included as attachments
# https://github.com/CiscoDevNet/webexteamssdk/pull/141
# TODO: This section can be removed once the above pull request is merged
for item, attachment in enumerate(mess.card):
if isinstance(attachment, AdaptiveCard):
mess.card[item] = webexteamssdk.utils.make_attachment(attachment)
# End of workaround
self.send_message(mess)
def send_message(self, mess):
"""
Send a message to Cisco Webex Teams
:param mess: A CiscoWebexTeamsMessage
"""
if not hasattr(mess, "card"):
mess.card = None
if not hasattr(mess, "files"):
mess.files = None
if not isinstance(mess.files, list) and mess.files is not None:
mess.files = [mess.files]
# Webex teams does not support a message that contains both a message/text AND a file
# so lets hide this shortcoming here by creating two separate messages
if mess.body and mess.files:
new_msg = copy(mess)
# First send text message
new_msg.files = None
self.send_message(new_msg)
# And then the message with the file(s)
new_msg.body = None
new_msg.files = mess.files
self.send_message(new_msg)
return
# Webex teams does not support more than one file in a single message
# so lets hide this shortcoming here by creating multiple separate messages
if mess.files and len(mess.files) > 1:
new_msg = copy(mess)
for file in mess.files:
new_msg.files = [file]
self.send_message(new_msg)
return
md = None
if mess.body:
# Need to strip out "markdown extra" as not supported by Webex Teams
md = markdown(
self.md.convert(mess.body),
extensions=[
"markdown.extensions.nl2br",
"markdown.extensions.fenced_code",
],
)
if type(mess.to) == CiscoWebexTeamsPerson:
self.webex_teams_api.messages.create(
toPersonId=mess.to.id,
text=mess.body,
markdown=md,
parentId=mess.parent,
attachments=mess.card,
files=mess.files,
)
return
self.webex_teams_api.messages.create(
roomId=mess.to.room.id,
text=mess.body,
markdown=md,
parentId=mess.parent,
attachments=mess.card,
files=mess.files,
)
def _teams_upload(self, stream):
"""
Performs an upload defined in a stream
:param stream: Stream object
:return: None
"""
try:
stream.accept()
log.exception(
f"Upload of {stream.raw.name} to {stream.identifier} has started."
)
if type(stream.identifier) == CiscoWebexTeamsPerson:
self.webex_teams_api.messages.create(
toPersonId=stream.identifier.id, files=[stream.raw.name]
)
else:
self.webex_teams_api.messages.create(
roomId=stream.identifier.room.id, files=[stream.raw.name]
)
stream.success()
log.exception(
f"Upload of {stream.raw.name} to {stream.identifier} has completed."
)
except Exception:
stream.error()
log.exception(
f"Upload of {stream.raw.name} to {stream.identifier} has failed."
)
finally:
stream.close()
def send_stream_request(
self, identifier, fsource, name="file", size=None, stream_type=None
):
"""
Send a file to Cisco Webex Teams
:param user: is the identifier of the person you want to send it to.
:param fsource: is a file object you want to send.
:param name: is an optional filename for it.
:param size: not supported in Webex Teams backend
:param stream_type: not supported in Webex Teams backend
"""
log.debug(f"Requesting upload of {fsource.name} to {identifier}.")
stream = Stream(identifier, fsource, name, size, stream_type)
self.thread_pool.apply_async(self._teams_upload, (stream,))
return stream
def build_reply(self, mess, text=None, private=False, threaded=True):
"""
Build a reply in the format expected by errbot by swapping the to and from source and destination
:param mess: The original CiscoWebexTeamsMessage object that will be replied to
:param text: The text that is to be sent in reply to the message
:param private: Boolean indicating whether the message should be directed as a private message in lieu of
sending it back to the room
:param threaded: Consider threading when creating the reply message
:return: CiscoWebexTeamsMessage
"""
response = self.build_message(text)
response.frm = mess.to
response.to = mess.frm
if threaded:
response.parent = mess.parent
return response
def disconnect_callback(self):
"""
Disconnection has been requested, lets make sure we clean up
"""
super().disconnect_callback()
def serve_once(self):
"""
Signal that we are connected to the Webex Teams Service and hang around waiting for disconnection request
"""
self.connect_callback()
try:
while True:
async def _run():
logging.debug(
"Opening websocket connection to %s"
% self.device_info["webSocketUrl"]
)
async with websockets.connect(
self.device_info["webSocketUrl"]
) as ws:
logging.info("WebSocket Opened\n")
msg = {
"id": str(uuid.uuid4()),
"type": "authorization",
"data": {"token": "Bearer " + self._bot_token},
}
await ws.send(json.dumps(msg))
self.reset_reconnection_count()
while True:
message = await ws.recv()
logging.debug(
"WebSocket Received Message(raw): %s\n" % message
)
try:
loop = asyncio.get_event_loop()
loop.run_in_executor(
None, self.process_websocket, message
)
except:
logging.warning(
"An exception occurred while processing message. Ignoring. "
)
asyncio.get_event_loop().run_until_complete(_run())
except KeyboardInterrupt:
log.info("Interrupt received, shutting down..")
return True
finally:
self.disconnect_callback()
def _get_device_info(self):
"""
Setup device in Webex Teams to bridge events across websocket
:return:
"""
logging.debug("Getting device list from Webex Teams")
try:
resp = self.webex_teams_api._session.get(DEVICES_URL)
for device in resp["devices"]:
if device["name"] == DEVICE_DATA["name"]:
self.device_info = device
return device
except webexteamssdk.ApiError:
pass
logging.info("Device does not exist in Webex Teams, creating")
resp = self.webex_teams_api._session.post(DEVICES_URL, json=DEVICE_DATA)
if resp is None:
raise FailedToCreateWebexDevice(
"Could not create Webex Teams device using {}".format(DEVICES_URL)
)
self.device_info = resp
return resp
def change_presence(self, status=OFFLINE, message=""):
"""
Backend: Change presence yet to be implemented
:param status:
:param message:
:return:
"""
log.debug("Backend: Change presence yet to be implemented") # TODO
pass
def prefix_groupchat_reply(self, message, identifier):
"""
Backend: Prefix group chat reply yet to be implemented
:param message:
:param identifier:
:return:
"""
log.debug("Backend: Prefix group chat reply yet to be implemented") # TODO
pass
@staticmethod
def build_hydra_id(uuid, message_type=HydraTypes.MESSAGE.value):
"""
Convert a UUID into Hydra ID that includes geo routing
:param uuid: The UUID to be encoded
:param message_type: The type of message to be encoded
:return (str): The encoded uuid
"""
return (
b64encode(f"{HYDRA_PREFIX}/{message_type}/{uuid}".encode("ascii")).decode(
"ascii"
)
if "-" in uuid
else uuid
)
def remember(self, id, key, value):
"""
Save the value of a key to a dictionary specific to a Webex Teams room or person
This is available in backend to provide easy access to variables that can be shared between plugins
:param id: Webex Teams ID of room or person
:param key: The dictionary key
:param value: The value to be assigned to the key
"""
values = self.recall(id)
values[key] = value
self[id] = values
def forget(self, id, key):
"""
Delete a key from a dictionary specific to a Webex Teams room or person
:param id: Webex Teams ID of room or person
:param key: The dictionary key
:return: The popped value or None if the key was not found
"""
values = self.recall(id)
value = values.pop(key, None)
self[id] = values
return value
def recall(self, id):
"""
Access a dictionary for a room or person using the Webex Teams ID as the key
:param id: Webex Teams ID of room or person
:return: A dictionary. If no dictionary was found an empty dictionary will be returned.
"""
values = self.get(id)
return values if values else {}
def recall_key(self, id, key):
"""
Access the value of a specific key from a Webex Teams room or person dictionary
:param id: Webex Teams ID of room or person
:param key: The dictionary key
:return: Either the value of the key or None if the key is not found
"""
return self.recall(id).get(key)
@staticmethod
def _unpickle_identifier(identifier_str):
return CiscoWebexTeamsBackend.__build_identifier(identifier_str)
@staticmethod
def _pickle_identifier(identifier):
return CiscoWebexTeamsBackend._unpickle_identifier, (str(identifier),)
def _register_identifiers_pickling(self):
"""
Register identifiers pickling.
"""
CiscoWebexTeamsBackend.__build_identifier = self.build_identifier
for cls in (
CiscoWebexTeamsPerson,
CiscoWebexTeamsRoomOccupant,
CiscoWebexTeamsRoom,
):
copyreg.pickle(
cls,
CiscoWebexTeamsBackend._pickle_identifier,
CiscoWebexTeamsBackend._unpickle_identifier,
)
| 31.132507
| 124
| 0.588677
|
acfd7bf3dde64b47a8131de3c6a975dfac129a67
| 725
|
py
|
Python
|
portfolio/urls.py
|
Toluhunter/portfolio
|
79ab350f1f218b64ab8ebacaa1dc54f42c4b73a1
|
[
"Apache-2.0"
] | 3
|
2021-11-21T11:48:49.000Z
|
2022-03-24T20:32:24.000Z
|
portfolio/urls.py
|
Toluhunter/portfolio
|
79ab350f1f218b64ab8ebacaa1dc54f42c4b73a1
|
[
"Apache-2.0"
] | null | null | null |
portfolio/urls.py
|
Toluhunter/portfolio
|
79ab350f1f218b64ab8ebacaa1dc54f42c4b73a1
|
[
"Apache-2.0"
] | null | null | null |
"""portfolio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
urlpatterns = [
path('', include("page.urls"))
]
| 34.52381
| 77
| 0.702069
|
acfd7d1ebd55b81f2d1b740a67a7da9ecc2f5dfe
| 1,975
|
py
|
Python
|
Assets/Python/BUG/BugHelp.py
|
macaurther/DOCUSA
|
40586727c351d1b1130c05c2d4648cca3a8bacf5
|
[
"MIT"
] | 93
|
2015-11-20T04:13:36.000Z
|
2022-03-24T00:03:08.000Z
|
Assets/Python/BUG/BugHelp.py
|
macaurther/DOCUSA
|
40586727c351d1b1130c05c2d4648cca3a8bacf5
|
[
"MIT"
] | 206
|
2015-11-09T00:27:15.000Z
|
2021-12-04T19:05:18.000Z
|
Assets/Python/BUG/BugHelp.py
|
dguenms/Dawn-of-Civilization
|
1c4f510af97a869637cddb4c0859759158cea5ce
|
[
"MIT"
] | 117
|
2015-11-08T02:43:46.000Z
|
2022-02-12T06:29:00.000Z
|
## BugHelp
##
## Opens BUG's help file, "BUG Mod Help.chm", or the online version, for the user's language.
##
## TODO:
## Move to configuration XML
## Support multiple help files and shortcuts
##
## Copyright (c) 2008 The BUG Mod.
##
## Author: EmperorFool
from CvPythonExtensions import *
import Popup as PyPopup
import BugPath
import BugUtil
def launch(argsList=None):
"""
Opens the mod's help file or web page externally if it can be found or displays an error alert.
On Windows this opens the compiled HTML help file (CHM).
On Mac this opens a browser window to the online help file.
"""
if BugPath.isMac():
sLang = ["ENG", "ENG", "DEU", "ITA", "ENG"]
url = "http://civ4bug.sourceforge.net/BUGModHelp/%s/index.htm" % sLang[CyGame().getCurrentLanguage()]
try:
import webbrowser
showLaunchMessage()
webbrowser.open(url, new=1, autoraise=1)
return True
except:
showErrorAlert(BugUtil.getPlainText("TXT_KEY_BUG_HELP_CANNOT_OPEN_BROWSER_TITLE"),
BugUtil.getText("TXT_KEY_BUG_HELP_CANNOT_OPEN_BROWSER_BODY", (url,)))
else:
sLang = ["ENG", "FRA", "DEU", "ITA", "ESP"]
name = "BUG Mod Help-%s.chm" % (sLang[CyGame().getCurrentLanguage()])
file = BugPath.findInfoFile(name)
if file:
import os
message = BugUtil.getPlainText("TXT_KEY_BUG_HELP_OPENING")
CyInterface().addImmediateMessage(message, "")
os.startfile(file)
return True
else:
showErrorAlert(BugUtil.getPlainText("TXT_KEY_BUG_HELP_MISSING_TITLE"),
BugUtil.getText("TXT_KEY_BUG_HELP_MISSING_BODY", (name,)))
return False
def showLaunchMessage():
"""
Shows an "opening..." alert message in the event log.
"""
BugUtil.alert(BugUtil.getPlainText("TXT_KEY_BUG_HELP_OPENING"))
def showErrorAlert(title, body):
"""
Opens a popup window showing the given error message.
"""
popup = PyPopup.PyPopup()
popup.setHeaderString(title)
popup.setBodyString(body)
popup.launch()
| 30.384615
| 104
| 0.69519
|
acfd7d2a93b46d4647b96f6e0eed8ae997bd12b2
| 408
|
py
|
Python
|
umusicfy/umusicfy/celery.py
|
CarlosMart626/umusicfy
|
97e2166fe26d1fbe36df6bea435044ef3d367edf
|
[
"Apache-2.0"
] | null | null | null |
umusicfy/umusicfy/celery.py
|
CarlosMart626/umusicfy
|
97e2166fe26d1fbe36df6bea435044ef3d367edf
|
[
"Apache-2.0"
] | 8
|
2020-06-05T18:08:05.000Z
|
2022-01-13T00:44:30.000Z
|
umusicfy/umusicfy/celery.py
|
CarlosMart626/umusicfy
|
97e2166fe26d1fbe36df6bea435044ef3d367edf
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "umusicfy.settings.prd")
app = Celery('umusicfy')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| 29.142857
| 72
| 0.813725
|
acfd7d470378175a182d58732102c9ecf8819452
| 15,825
|
py
|
Python
|
pgmpy/factors/FactorSet.py
|
predictive-analytics-lab/pgmpy
|
6c2a31641adc72793acd130d007190fdb1632271
|
[
"MIT"
] | null | null | null |
pgmpy/factors/FactorSet.py
|
predictive-analytics-lab/pgmpy
|
6c2a31641adc72793acd130d007190fdb1632271
|
[
"MIT"
] | null | null | null |
pgmpy/factors/FactorSet.py
|
predictive-analytics-lab/pgmpy
|
6c2a31641adc72793acd130d007190fdb1632271
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from pgmpy.extern.six.moves import filter, reduce
from pgmpy.factors.base import BaseFactor
from pgmpy.extern import six
class FactorSet(object):
r"""
Base class of *DiscreteFactor Sets*.
A factor set provides a compact representation of higher dimensional factor
:math:`\phi_1\cdot\phi_2\cdots\phi_n`
For example the factor set corresponding to factor :math:`\phi_1\cdot\phi_2` would be the union of the factors
:math:`\phi_1` and :math:`\phi_2` i.e. factor set :math:`\vec\phi = \phi_1 \cup \phi_2`.
"""
def __init__(self, *factors_list):
"""
Initialize the factor set class.
Parameters
----------
factors_list: Factor1, Factor2, ....
All the factors whose product is represented by the factor set
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set = FactorSet(phi1, phi2)
>>> factor_set
<pgmpy.factors.FactorSet.FactorSet at 0x7f8e32af6d50>
>>> print(factor_set)
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b4c2d0>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b4c710>])
"""
if not all(isinstance(phi, BaseFactor) for phi in factors_list):
raise TypeError("Input parameters must be child classes of BaseFactor")
self.factors = set([factor.copy() for factor in factors_list])
def add_factors(self, *factors):
"""
Adds factors to the factor set.
Parameters
----------
factors: Factor1, Factor2, ...., Factorn
factors to be added into the factor set
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set1.add_factors(phi3, phi4)
>>> print(factor_set1)
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b4ca10>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e4c393690>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b4c750>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b4cb50>])
"""
self.factors.update(factors)
def remove_factors(self, *factors):
"""
Removes factors from the factor set.
Parameters
----------
factors: Factor1, Factor2, ...., Factorn
factors to be removed from the factor set
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> factor_set1.add_factors(phi3)
>>> print(factor_set1)
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b5b050>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b250>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b5b150>])
>>> factor_set1.remove_factors(phi1, phi2)
>>> print(factor_set1)
set([<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b4cb10>])
"""
for factor in factors:
self.factors.remove(factor)
def get_factors(self):
"""
Returns all the factors present in factor set.
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> factor_set1.add_factors(phi3)
>>> factor_set1.get_factors()
{<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f827c0a23c8>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f827c0a2358>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f825243f9e8>}
"""
return self.factors
def product(self, factorset, inplace=True):
r"""
Return the factor sets product with the given factor sets
Suppose :math:`\vec\phi_1` and :math:`\vec\phi_2` are two factor sets then their product is a another factors
set :math:`\vec\phi_3 = \vec\phi_1 \cup \vec\phi_2`.
Parameters
----------
factorsets: FactorSet1, FactorSet2, ..., FactorSetn
FactorSets to be multiplied
inplace: A boolean (Default value True)
If inplace = True , then it will modify the FactorSet object, if False, it will
return a new FactorSet object.
Returns
--------
If inpalce = False, will return a new FactorSet object, which is product of two factors
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set2 = FactorSet(phi3, phi4)
>>> print(factor_set2)
set([<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b050>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b690>])
>>> factor_set2.product(factor_set1)
>>> print(factor_set2)
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b4c910>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b4cc50>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b050>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b690>])
>>> factor_set2 = FactorSet(phi3, phi4)
>>> factor_set3 = factor_set2.product(factor_set1, inplace=False)
>>> print(factor_set2)
set([<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b060>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b790>])
"""
factor_set = self if inplace else self.copy()
factor_set1 = factorset.copy()
factor_set.add_factors(*factor_set1.factors)
if not inplace:
return factor_set
def divide(self, factorset, inplace=True):
r"""
Returns a new factor set instance after division by the factor set
Division of two factor sets :math:`\frac{\vec\phi_1}{\vec\phi_2}` basically translates to union of all the
factors present in :math:`\vec\phi_2` and :math:`\frac{1}{\phi_i}` of all the factors present in
:math:`\vec\phi_2`.
Parameters
----------
factorset: FactorSet
The divisor
inplace: A boolean (Default value True)
If inplace = True ,then it will modify the FactorSet object, if False then will
return a new FactorSet object.
Returns
--------
If inplace = False, will return a new FactorSet Object which is division of
given factors.
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set2 = FactorSet(phi3, phi4)
>>> factor_set3 = factor_set2.divide(factor_set1)
>>> print(factor_set3)
set([<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b5ba10>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b650>,
<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b5b050>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b8d0>])
"""
factor_set = self if inplace else self.copy()
factor_set1 = factorset.copy()
factor_set.add_factors(*[phi.identity_factor() / phi for phi in factor_set1.factors])
if not inplace:
return factor_set
def marginalize(self, variables, inplace=True):
"""
Marginalizes the factors present in the factor sets with respect to the given variables.
Parameters
----------
variables: list, array-like
List of the variables to be marginalized.
inplace: boolean (Default value True)
If inplace=True it will modify the factor set itself, would create a new factor set
Returns
-------
If inplace = False, will return a new marginalized FactorSet object.
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> factor_set1.marginalize('x1')
>>> print(factor_set1)
set([<DiscreteFactor representing phi(x2:3, x3:2) at 0x7f8e32b4cc10>,
<DiscreteFactor representing phi(x3:2, x4:2) at 0x7f8e32b4cf90>])
"""
if isinstance(variables, six.string_types):
raise TypeError("Expected list or array-like type got type str")
factor_set = self if inplace else self.copy()
factors_to_be_marginalized = set(
filter(lambda x: set(x.scope()).intersection(variables), factor_set.factors)
)
for factor in factors_to_be_marginalized:
variables_to_be_marginalized = list(set(factor.scope()).intersection(variables))
if inplace:
factor.marginalize(variables_to_be_marginalized, inplace=True)
else:
factor_set.remove_factors(factor)
factor_set.add_factors(
factor.marginalize(variables_to_be_marginalized, inplace=False)
)
if not inplace:
return factor_set
def __mul__(self, other):
return self.product(other)
def __truediv__(self, other):
return self.divide(other)
def __str__(self):
return self.factors.__str__()
def copy(self):
"""
Create a copy of factor set.
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set = FactorSet(phi1, phi2)
>>> factor_set
<pgmpy.factors.FactorSet.FactorSet at 0x7fa68f390320>
>>> factor_set_copy = factor_set.copy()
>>> factor_set_copy
<pgmpy.factors.FactorSet.FactorSet at 0x7f91a0031160>
"""
# No need to have copies of factors as argument because __init__ method creates copies.
return FactorSet(*self.factors)
def factorset_product(*factorsets_list):
r"""
Base method used for product of factor sets.
Suppose :math:`\vec\phi_1` and :math:`\vec\phi_2` are two factor sets then their product is a another factors set
:math:`\vec\phi_3 = \vec\phi_1 \cup \vec\phi_2`.
Parameters
----------
factorsets_list: FactorSet1, FactorSet2, ..., FactorSetn
All the factor sets to be multiplied
Returns
-------
Product of factorset in factorsets_list
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> from pgmpy.factors import factorset_product
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set2 = FactorSet(phi3, phi4)
>>> factor_set3 = factorset_product(factor_set1, factor_set2)
>>> print(factor_set3)
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7fb3a1933e90>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7fb3a1933f10>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7fb3a1933f90>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7fb3a1933e10>])
"""
if not all(isinstance(factorset, FactorSet) for factorset in factorsets_list):
raise TypeError("Input parameters must be FactorSet instances")
return reduce(lambda x, y: x.product(y, inplace=False), factorsets_list)
def factorset_divide(factorset1, factorset2):
r"""
Base method for dividing two factor sets.
Division of two factor sets :math:`\frac{\vec\phi_1}{\vec\phi_2}` basically translates to union of all the factors
present in :math:`\vec\phi_2` and :math:`\frac{1}{\phi_i}` of all the factors present in :math:`\vec\phi_2`.
Parameters
----------
factorset1: FactorSet
The dividend
factorset2: FactorSet
The divisor
Returns
-------
The division of factorset1 and factorset2
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> from pgmpy.factors import factorset_divide
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set2 = FactorSet(phi3, phi4)
>>> factor_set3 = factorset_divide(factor_set2, factor_set1)
>>> print(factor_set3)
set([<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f119ad78f90>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f119ad78e50>,
<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f119ad78ed0>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f119ad78e90>])
"""
if not isinstance(factorset1, FactorSet) or not isinstance(factorset2, FactorSet):
raise TypeError("factorset1 and factorset2 must be FactorSet instances")
return factorset1.divide(factorset2, inplace=False)
| 41.426702
| 118
| 0.600885
|
acfd7dc850f9f04cb23a8cfe6f92c52a5d2d32db
| 1,499
|
py
|
Python
|
azext_iot/sdk/digitaltwins_arm/models/error_definition.py
|
harunpehlivan/azure-iot-cli-extension
|
ddf4f0beb510551999fc97dc5af7f4c7ed2d202f
|
[
"MIT"
] | null | null | null |
azext_iot/sdk/digitaltwins_arm/models/error_definition.py
|
harunpehlivan/azure-iot-cli-extension
|
ddf4f0beb510551999fc97dc5af7f4c7ed2d202f
|
[
"MIT"
] | null | null | null |
azext_iot/sdk/digitaltwins_arm/models/error_definition.py
|
harunpehlivan/azure-iot-cli-extension
|
ddf4f0beb510551999fc97dc5af7f4c7ed2d202f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ErrorDefinition(Model):
"""Error definition.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar code: Service specific error code which serves as the substatus for
the HTTP error code.
:vartype code: str
:ivar message: Description of the error.
:vartype message: str
:ivar details: Internal error details.
:vartype details: list[~digitaltwins-arm.models.ErrorDefinition]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDefinition]'},
}
def __init__(self, **kwargs):
super(ErrorDefinition, self).__init__(**kwargs)
self.code = None
self.message = None
self.details = None
| 31.893617
| 77
| 0.582388
|
acfd7e60597bf6a28d4997d2ec549c24fa065053
| 22,757
|
py
|
Python
|
theano/tests/test_printing.py
|
mdda/Theano
|
6ca7b2b65000e371f009b617d41bc5a90f022d38
|
[
"BSD-3-Clause"
] | null | null | null |
theano/tests/test_printing.py
|
mdda/Theano
|
6ca7b2b65000e371f009b617d41bc5a90f022d38
|
[
"BSD-3-Clause"
] | null | null | null |
theano/tests/test_printing.py
|
mdda/Theano
|
6ca7b2b65000e371f009b617d41bc5a90f022d38
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Tests of printing functionality
"""
from __future__ import print_function
import logging
from nose.plugins.skip import SkipTest
import numpy
from six.moves import StringIO
import theano
import theano.tensor as tensor
from theano.printing import min_informative_str, debugprint
def test_pydotprint_cond_highlight():
"""
This is a REALLY PARTIAL TEST.
I did them to help debug stuff.
"""
# Skip test if pydot is not available.
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
x = tensor.dvector()
f = theano.function([x], x * 2)
f([1, 2, 3, 4])
s = StringIO()
new_handler = logging.StreamHandler(s)
new_handler.setLevel(logging.DEBUG)
orig_handler = theano.logging_default_handler
theano.theano_logger.removeHandler(orig_handler)
theano.theano_logger.addHandler(new_handler)
try:
theano.printing.pydotprint(f, cond_highlight=True,
print_output_file=False)
finally:
theano.theano_logger.addHandler(orig_handler)
theano.theano_logger.removeHandler(new_handler)
assert (s.getvalue() == 'pydotprint: cond_highlight is set but there'
' is no IfElse node in the graph\n')
def test_pydotprint_return_image():
# Skip test if pydot is not available.
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
x = tensor.dvector()
ret = theano.printing.pydotprint(x * 2, return_image=True)
assert isinstance(ret, str)
def test_pydotprint_variables():
"""
This is a REALLY PARTIAL TEST.
I did them to help debug stuff.
It make sure the code run.
"""
# Skip test if pydot is not available.
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
x = tensor.dvector()
s = StringIO()
new_handler = logging.StreamHandler(s)
new_handler.setLevel(logging.DEBUG)
orig_handler = theano.logging_default_handler
theano.theano_logger.removeHandler(orig_handler)
theano.theano_logger.addHandler(new_handler)
try:
theano.printing.pydotprint(x * 2)
theano.printing.pydotprint_variables(x * 2)
finally:
theano.theano_logger.addHandler(orig_handler)
theano.theano_logger.removeHandler(new_handler)
def test_pydotprint_long_name():
"""This is a REALLY PARTIAL TEST.
It prints a graph where there are variable and apply nodes whose long
names are different, but not the shortened names.
We should not merge those nodes in the dot graph.
"""
# Skip test if pydot is not available.
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
x = tensor.dvector()
mode = theano.compile.mode.get_default_mode().excluding("fusion")
f = theano.function([x], [x * 2, x + x], mode=mode)
f([1, 2, 3, 4])
theano.printing.pydotprint(f, max_label_size=5,
print_output_file=False)
theano.printing.pydotprint([x * 2, x + x],
max_label_size=5,
print_output_file=False)
def test_pydotprint_profile():
"""Just check that pydotprint does not crash with ProfileMode."""
# Skip test if pydot is not available.
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
A = tensor.matrix()
f = theano.function([A], A + 1, mode='ProfileMode')
theano.printing.pydotprint(f, print_output_file=False)
def test_min_informative_str():
""" evaluates a reference output to make sure the
min_informative_str function works as intended """
A = tensor.matrix(name='A')
B = tensor.matrix(name='B')
C = A + B
C.name = 'C'
D = tensor.matrix(name='D')
E = tensor.matrix(name='E')
F = D + E
G = C + F
mis = min_informative_str(G).replace("\t", " ")
reference = """A. Elemwise{add,no_inplace}
B. C
C. Elemwise{add,no_inplace}
D. D
E. E"""
if mis != reference:
print('--' + mis + '--')
print('--' + reference + '--')
assert mis == reference
def test_debugprint():
A = tensor.matrix(name='A')
B = tensor.matrix(name='B')
C = A + B
C.name = 'C'
D = tensor.matrix(name='D')
E = tensor.matrix(name='E')
F = D + E
G = C + F
# just test that it work
debugprint(G)
# test ids=int
s = StringIO()
debugprint(G, file=s, ids='int')
s = s.getvalue()
# The additional white space are needed!
reference = '\n'.join([
"Elemwise{add,no_inplace} [@0] '' ",
" |Elemwise{add,no_inplace} [@1] 'C' ",
" | |A [@2]",
" | |B [@3]",
" |Elemwise{add,no_inplace} [@4] '' ",
" |D [@5]",
" |E [@6]",
]) + '\n'
if s != reference:
print('--' + s + '--')
print('--' + reference + '--')
assert s == reference
# test ids=CHAR
s = StringIO()
debugprint(G, file=s, ids='CHAR')
s = s.getvalue()
# The additional white space are needed!
reference = "\n".join([
"Elemwise{add,no_inplace} [@A] '' ",
" |Elemwise{add,no_inplace} [@B] 'C' ",
" | |A [@C]",
" | |B [@D]",
" |Elemwise{add,no_inplace} [@E] '' ",
" |D [@F]",
" |E [@G]",
]) + '\n'
if s != reference:
print('--' + s + '--')
print('--' + reference + '--')
assert s == reference
# test ids=CHAR, stop_on_name=True
s = StringIO()
debugprint(G, file=s, ids='CHAR', stop_on_name=True)
s = s.getvalue()
# The additional white space are needed!
reference = '\n'.join([
"Elemwise{add,no_inplace} [@A] '' ",
" |Elemwise{add,no_inplace} [@B] 'C' ",
" |Elemwise{add,no_inplace} [@C] '' ",
" |D [@D]",
" |E [@E]",
]) + '\n'
if s != reference:
print('--' + s + '--')
print('--' + reference + '--')
assert s == reference
# test ids=
s = StringIO()
debugprint(G, file=s, ids='')
s = s.getvalue()
# The additional white space are needed!
reference = '\n'.join([
"Elemwise{add,no_inplace} '' ",
" |Elemwise{add,no_inplace} 'C' ",
" | |A ",
" | |B ",
" |Elemwise{add,no_inplace} '' ",
" |D ",
" |E ",
]) + '\n'
if s != reference:
print('--' + s + '--')
print('--' + reference + '--')
assert s == reference
def test_scan_debugprint1():
k = tensor.iscalar("k")
A = tensor.dvector("A")
# Symbolic description of the result
result, updates = theano.scan(fn=lambda prior_result, A: prior_result * A,
outputs_info=tensor.ones_like(A),
non_sequences=A,
n_steps=k)
final_result = result[-1]
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Subtensor{int64} [@A] ''
|Subtensor{int64::} [@B] ''
| |for{cpu,scan_fn} [@C] ''
| | |k [@D]
| | |IncSubtensor{Set;:int64:} [@E] ''
| | | |Alloc [@F] ''
| | | | |TensorConstant{0.0} [@G]
| | | | |Elemwise{add,no_inplace} [@H] ''
| | | | | |k [@D]
| | | | | |Subtensor{int64} [@I] ''
| | | | | |Shape [@J] ''
| | | | | | |Rebroadcast{0} [@K] ''
| | | | | | |DimShuffle{x,0} [@L] ''
| | | | | | |Elemwise{second,no_inplace} [@M] ''
| | | | | | |A [@N]
| | | | | | |DimShuffle{x} [@O] ''
| | | | | | |TensorConstant{1.0} [@P]
| | | | | |Constant{0} [@Q]
| | | | |Subtensor{int64} [@R] ''
| | | | |Shape [@S] ''
| | | | | |Rebroadcast{0} [@K] ''
| | | | |Constant{1} [@T]
| | | |Rebroadcast{0} [@K] ''
| | | |ScalarFromTensor [@U] ''
| | | |Subtensor{int64} [@I] ''
| | |A [@N]
| |Constant{1} [@V]
|Constant{-1} [@W]
Inner graphs of the scan ops:
for{cpu,scan_fn} [@C] ''
>Elemwise{mul,no_inplace} [@X] ''
> |<TensorType(float64, vector)> [@Y] -> [@E]
> |A_copy [@Z] -> [@N]"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint2():
coefficients = theano.tensor.vector("coefficients")
x = tensor.scalar("x")
max_coefficients_supported = 10000
# Generate the components of the polynomial
components, updates = theano.scan(fn=lambda coefficient, power,
free_variable:
coefficient * (free_variable ** power),
outputs_info=None,
sequences=[
coefficients,
theano.tensor.arange(
max_coefficients_supported)],
non_sequences=x)
# Sum them up
polynomial = components.sum()
output_str = theano.printing.debugprint(polynomial, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Sum{acc_dtype=float64} [@A] ''
|for{cpu,scan_fn} [@B] ''
|Elemwise{minimum,no_inplace} [@C] ''
| |Subtensor{int64} [@D] ''
| | |Shape [@E] ''
| | | |Subtensor{int64::} [@F] 'coefficients[0:]'
| | | |coefficients [@G]
| | | |Constant{0} [@H]
| | |Constant{0} [@I]
| |Subtensor{int64} [@J] ''
| |Shape [@K] ''
| | |Subtensor{int64::} [@L] ''
| | |ARange{dtype='int64'} [@M] ''
| | | |TensorConstant{0} [@N]
| | | |TensorConstant{10000} [@O]
| | | |TensorConstant{1} [@P]
| | |Constant{0} [@Q]
| |Constant{0} [@R]
|Subtensor{:int64:} [@S] ''
| |Subtensor{int64::} [@F] 'coefficients[0:]'
| |ScalarFromTensor [@T] ''
| |Elemwise{minimum,no_inplace} [@C] ''
|Subtensor{:int64:} [@U] ''
| |Subtensor{int64::} [@L] ''
| |ScalarFromTensor [@V] ''
| |Elemwise{minimum,no_inplace} [@C] ''
|Elemwise{minimum,no_inplace} [@C] ''
|x [@W]
Inner graphs of the scan ops:
for{cpu,scan_fn} [@B] ''
>Elemwise{mul,no_inplace} [@X] ''
> |coefficients[t] [@Y] -> [@S]
> |Elemwise{pow,no_inplace} [@Z] ''
> |x_copy [@BA] -> [@W]
> |<TensorType(int64, scalar)> [@BB] -> [@U]"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint3():
coefficients = theano.tensor.dvector("coefficients")
max_coefficients_supported = 10
k = tensor.iscalar("k")
A = tensor.dvector("A")
# compute A**k
def compute_A_k(A, k):
# Symbolic description of the result
result, updates = theano.scan(fn=lambda prior_result,
A: prior_result * A,
outputs_info=tensor.ones_like(A),
non_sequences=A,
n_steps=k)
A_k = result[-1]
return A_k
# Generate the components of the polynomial
components, updates = theano.scan(fn=lambda coefficient,
power, some_A, some_k:
coefficient *
(compute_A_k(some_A, some_k) ** power),
outputs_info=None,
sequences=[
coefficients,
theano.tensor.arange(
max_coefficients_supported)],
non_sequences=[A, k])
# Sum them up
polynomial = components.sum()
final_result = polynomial
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Sum{acc_dtype=float64} [@A] ''
|for{cpu,scan_fn} [@B] ''
|Elemwise{minimum,no_inplace} [@C] ''
| |Subtensor{int64} [@D] ''
| | |Shape [@E] ''
| | | |Subtensor{int64::} [@F] 'coefficients[0:]'
| | | |coefficients [@G]
| | | |Constant{0} [@H]
| | |Constant{0} [@I]
| |Subtensor{int64} [@J] ''
| |Shape [@K] ''
| | |Subtensor{int64::} [@L] ''
| | |ARange{dtype='int64'} [@M] ''
| | | |TensorConstant{0} [@N]
| | | |TensorConstant{10} [@O]
| | | |TensorConstant{1} [@P]
| | |Constant{0} [@Q]
| |Constant{0} [@R]
|Subtensor{:int64:} [@S] ''
| |Subtensor{int64::} [@F] 'coefficients[0:]'
| |ScalarFromTensor [@T] ''
| |Elemwise{minimum,no_inplace} [@C] ''
|Subtensor{:int64:} [@U] ''
| |Subtensor{int64::} [@L] ''
| |ScalarFromTensor [@V] ''
| |Elemwise{minimum,no_inplace} [@C] ''
|Elemwise{minimum,no_inplace} [@C] ''
|A [@W]
|k [@X]
Inner graphs of the scan ops:
for{cpu,scan_fn} [@B] ''
>Elemwise{mul,no_inplace} [@Y] ''
> |DimShuffle{x} [@Z] ''
> | |coefficients[t] [@BA] -> [@S]
> |Elemwise{pow,no_inplace} [@BB] ''
> |Subtensor{int64} [@BC] ''
> | |Subtensor{int64::} [@BD] ''
> | | |for{cpu,scan_fn} [@BE] ''
> | | | |k_copy [@BF] -> [@X]
> | | | |IncSubtensor{Set;:int64:} [@BG] ''
> | | | | |Alloc [@BH] ''
> | | | | | |TensorConstant{0.0} [@BI]
> | | | | | |Elemwise{add,no_inplace} [@BJ] ''
> | | | | | | |k_copy [@BF] -> [@X]
> | | | | | | |Subtensor{int64} [@BK] ''
> | | | | | | |Shape [@BL] ''
> | | | | | | | |Rebroadcast{0} [@BM] ''
> | | | | | | | |DimShuffle{x,0} [@BN] ''
> | | | | | | | |Elemwise{second,no_inplace} [@BO] ''
> | | | | | | | |A_copy [@BP] -> [@W]
> | | | | | | | |DimShuffle{x} [@BQ] ''
> | | | | | | | |TensorConstant{1.0} [@BR]
> | | | | | | |Constant{0} [@BS]
> | | | | | |Subtensor{int64} [@BT] ''
> | | | | | |Shape [@BU] ''
> | | | | | | |Rebroadcast{0} [@BM] ''
> | | | | | |Constant{1} [@BV]
> | | | | |Rebroadcast{0} [@BM] ''
> | | | | |ScalarFromTensor [@BW] ''
> | | | | |Subtensor{int64} [@BK] ''
> | | | |A_copy [@BP] -> [@W]
> | | |Constant{1} [@BX]
> | |Constant{-1} [@BY]
> |DimShuffle{x} [@BZ] ''
> |<TensorType(int64, scalar)> [@CA] -> [@U]
for{cpu,scan_fn} [@BE] ''
>Elemwise{mul,no_inplace} [@CB] ''
> |<TensorType(float64, vector)> [@CC] -> [@BG]
> |A_copy [@CD] -> [@BP]"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint4():
def fn(a_m2, a_m1, b_m2, b_m1):
return a_m1 + a_m2, b_m1 + b_m2
a0 = theano.shared(numpy.arange(2))
b0 = theano.shared(numpy.arange(2))
(a, b), _ = theano.scan(
fn, outputs_info=[{'initial': a0, 'taps': [-2, -1]},
{'initial': b0, 'taps': [-2, -1]}],
n_steps=5)
final_result = a + b
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Elemwise{add,no_inplace} [@A] ''
|Subtensor{int64::} [@B] ''
| |for{cpu,scan_fn}.0 [@C] ''
| | |TensorConstant{5} [@D]
| | |IncSubtensor{Set;:int64:} [@E] ''
| | | |Alloc [@F] ''
| | | | |TensorConstant{0} [@G]
| | | | |Elemwise{add,no_inplace} [@H] ''
| | | | |TensorConstant{5} [@D]
| | | | |Subtensor{int64} [@I] ''
| | | | |Shape [@J] ''
| | | | | |Subtensor{:int64:} [@K] ''
| | | | | |<TensorType(int64, vector)> [@L]
| | | | | |Constant{2} [@M]
| | | | |Constant{0} [@N]
| | | |Subtensor{:int64:} [@K] ''
| | | |ScalarFromTensor [@O] ''
| | | |Subtensor{int64} [@I] ''
| | |IncSubtensor{Set;:int64:} [@P] ''
| | |Alloc [@Q] ''
| | | |TensorConstant{0} [@G]
| | | |Elemwise{add,no_inplace} [@R] ''
| | | |TensorConstant{5} [@D]
| | | |Subtensor{int64} [@S] ''
| | | |Shape [@T] ''
| | | | |Subtensor{:int64:} [@U] ''
| | | | |<TensorType(int64, vector)> [@V]
| | | | |Constant{2} [@W]
| | | |Constant{0} [@X]
| | |Subtensor{:int64:} [@U] ''
| | |ScalarFromTensor [@Y] ''
| | |Subtensor{int64} [@S] ''
| |Constant{2} [@Z]
|Subtensor{int64::} [@BA] ''
|for{cpu,scan_fn}.1 [@C] ''
|Constant{2} [@BB]
Inner graphs of the scan ops:
for{cpu,scan_fn}.0 [@C] ''
>Elemwise{add,no_inplace} [@BC] ''
> |<TensorType(int64, scalar)> [@BD] -> [@E]
> |<TensorType(int64, scalar)> [@BE] -> [@E]
>Elemwise{add,no_inplace} [@BF] ''
> |<TensorType(int64, scalar)> [@BG] -> [@P]
> |<TensorType(int64, scalar)> [@BH] -> [@P]
for{cpu,scan_fn}.1 [@C] ''
>Elemwise{add,no_inplace} [@BC] ''
>Elemwise{add,no_inplace} [@BF] ''"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint5():
k = tensor.iscalar("k")
A = tensor.dvector("A")
# Symbolic description of the result
result, updates = theano.scan(fn=lambda prior_result, A: prior_result * A,
outputs_info=tensor.ones_like(A),
non_sequences=A,
n_steps=k)
final_result = tensor.grad(result[-1].sum(), A)
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Subtensor{int64} [@A] ''
|for{cpu,grad_of_scan_fn}.1 [@B] ''
| |Elemwise{sub,no_inplace} [@C] ''
| | |Subtensor{int64} [@D] ''
| | | |Shape [@E] ''
| | | | |for{cpu,scan_fn} [@F] ''
| | | | |k [@G]
| | | | |IncSubtensor{Set;:int64:} [@H] ''
| | | | | |Alloc [@I] ''
| | | | | | |TensorConstant{0.0} [@J]
| | | | | | |Elemwise{add,no_inplace} [@K] ''
| | | | | | | |k [@G]
| | | | | | | |Subtensor{int64} [@L] ''
| | | | | | | |Shape [@M] ''
| | | | | | | | |Rebroadcast{0} [@N] ''
| | | | | | | | |DimShuffle{x,0} [@O] ''
| | | | | | | | |Elemwise{second,no_inplace} [@P] ''
| | | | | | | | |A [@Q]
| | | | | | | | |DimShuffle{x} [@R] ''
| | | | | | | | |TensorConstant{1.0} [@S]
| | | | | | | |Constant{0} [@T]
| | | | | | |Subtensor{int64} [@U] ''
| | | | | | |Shape [@V] ''
| | | | | | | |Rebroadcast{0} [@N] ''
| | | | | | |Constant{1} [@W]
| | | | | |Rebroadcast{0} [@N] ''
| | | | | |ScalarFromTensor [@X] ''
| | | | | |Subtensor{int64} [@L] ''
| | | | |A [@Q]
| | | |Constant{0} [@Y]
| | |TensorConstant{1} [@Z]
| |Subtensor{:int64:} [@BA] ''
| | |Subtensor{::int64} [@BB] ''
| | | |Subtensor{:int64:} [@BC] ''
| | | | |for{cpu,scan_fn} [@F] ''
| | | | |Constant{-1} [@BD]
| | | |Constant{-1} [@BE]
| | |ScalarFromTensor [@BF] ''
| | |Elemwise{sub,no_inplace} [@C] ''
| |Subtensor{:int64:} [@BG] ''
| | |Subtensor{:int64:} [@BH] ''
| | | |Subtensor{::int64} [@BI] ''
| | | | |for{cpu,scan_fn} [@F] ''
| | | | |Constant{-1} [@BJ]
| | | |Constant{-1} [@BK]
| | |ScalarFromTensor [@BL] ''
| | |Elemwise{sub,no_inplace} [@C] ''
| |Subtensor{::int64} [@BM] ''
| | |IncSubtensor{Inc;int64::} [@BN] ''
| | | |Elemwise{second,no_inplace} [@BO] ''
| | | | |for{cpu,scan_fn} [@BP] ''
| | | | | |k [@G]
| | | | | |IncSubtensor{Set;:int64:} [@H] ''
| | | | | |A [@Q]
| | | | |DimShuffle{x,x} [@BQ] ''
| | | | |TensorConstant{0.0} [@J]
| | | |IncSubtensor{Inc;int64} [@BR] ''
| | | | |Elemwise{second,no_inplace} [@BS] ''
| | | | | |Subtensor{int64::} [@BT] ''
| | | | | | |for{cpu,scan_fn} [@BP] ''
| | | | | | |Constant{1} [@BU]
| | | | | |DimShuffle{x,x} [@BV] ''
| | | | | |TensorConstant{0.0} [@J]
| | | | |Elemwise{second} [@BW] ''
| | | | | |Subtensor{int64} [@BX] ''
| | | | | | |Subtensor{int64::} [@BT] ''
| | | | | | |Constant{-1} [@BY]
| | | | | |DimShuffle{x} [@BZ] ''
| | | | | |Elemwise{second,no_inplace} [@CA] ''
| | | | | |Sum{acc_dtype=float64} [@CB] ''
| | | | | | |Subtensor{int64} [@BX] ''
| | | | | |TensorConstant{1.0} [@S]
| | | | |Constant{-1} [@BY]
| | | |Constant{1} [@BU]
| | |Constant{-1} [@CC]
| |Alloc [@CD] ''
| | |TensorConstant{0.0} [@J]
| | |Elemwise{add,no_inplace} [@CE] ''
| | | |Elemwise{sub,no_inplace} [@C] ''
| | | |TensorConstant{1} [@Z]
| | |Subtensor{int64} [@CF] ''
| | |Shape [@CG] ''
| | | |A [@Q]
| | |Constant{0} [@CH]
| |A [@Q]
|Constant{-1} [@CI]
Inner graphs of the scan ops:
for{cpu,grad_of_scan_fn}.1 [@B] ''
>Elemwise{add,no_inplace} [@CJ] ''
> |Elemwise{mul} [@CK] ''
> | |<TensorType(float64, vector)> [@CL] -> [@BM]
> | |A_copy [@CM] -> [@Q]
> |<TensorType(float64, vector)> [@CN] -> [@BM]
>Elemwise{add,no_inplace} [@CO] ''
> |Elemwise{mul} [@CP] ''
> | |<TensorType(float64, vector)> [@CL] -> [@BM]
> | |<TensorType(float64, vector)> [@CQ] -> [@BA]
> |<TensorType(float64, vector)> [@CR] -> [@CD]
for{cpu,scan_fn} [@F] ''
>Elemwise{mul,no_inplace} [@CS] ''
> |<TensorType(float64, vector)> [@CT] -> [@H]
> |A_copy [@CU] -> [@Q]
for{cpu,scan_fn} [@F] ''
>Elemwise{mul,no_inplace} [@CS] ''
for{cpu,scan_fn} [@F] ''
>Elemwise{mul,no_inplace} [@CS] ''
for{cpu,scan_fn} [@BP] ''
>Elemwise{mul,no_inplace} [@CS] ''
for{cpu,scan_fn} [@BP] ''
>Elemwise{mul,no_inplace} [@CS] ''"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
| 32.279433
| 78
| 0.474096
|
acfd7eaffbf560558276c50376ac09fcdc9a0764
| 52,038
|
py
|
Python
|
acme/acme/client_test.py
|
realkcn/certbot
|
2b1cb9037b1c908a2fdf73d5faca4db6b79d9214
|
[
"Apache-2.0"
] | null | null | null |
acme/acme/client_test.py
|
realkcn/certbot
|
2b1cb9037b1c908a2fdf73d5faca4db6b79d9214
|
[
"Apache-2.0"
] | null | null | null |
acme/acme/client_test.py
|
realkcn/certbot
|
2b1cb9037b1c908a2fdf73d5faca4db6b79d9214
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for acme.client."""
import copy
import datetime
import json
import unittest
from six.moves import http_client # pylint: disable=import-error
import josepy as jose
import mock
import OpenSSL
import requests
from acme import challenges
from acme import errors
from acme import jws as acme_jws
from acme import messages
from acme import messages_test
from acme import test_util
from acme.magic_typing import Dict # pylint: disable=unused-import, no-name-in-module
CERT_DER = test_util.load_vector('cert.der')
CERT_SAN_PEM = test_util.load_vector('cert-san.pem')
CSR_SAN_PEM = test_util.load_vector('csr-san.pem')
KEY = jose.JWKRSA.load(test_util.load_vector('rsa512_key.pem'))
KEY2 = jose.JWKRSA.load(test_util.load_vector('rsa256_key.pem'))
DIRECTORY_V1 = messages.Directory({
messages.NewRegistration:
'https://www.letsencrypt-demo.org/acme/new-reg',
messages.Revocation:
'https://www.letsencrypt-demo.org/acme/revoke-cert',
messages.NewAuthorization:
'https://www.letsencrypt-demo.org/acme/new-authz',
messages.CertificateRequest:
'https://www.letsencrypt-demo.org/acme/new-cert',
})
DIRECTORY_V2 = messages.Directory({
'newAccount': 'https://www.letsencrypt-demo.org/acme/new-account',
'newNonce': 'https://www.letsencrypt-demo.org/acme/new-nonce',
'newOrder': 'https://www.letsencrypt-demo.org/acme/new-order',
'revokeCert': 'https://www.letsencrypt-demo.org/acme/revoke-cert',
})
class ClientTestBase(unittest.TestCase):
"""Base for tests in acme.client."""
def setUp(self):
self.response = mock.MagicMock(
ok=True, status_code=http_client.OK, headers={}, links={})
self.net = mock.MagicMock()
self.net.post.return_value = self.response
self.net.get.return_value = self.response
self.identifier = messages.Identifier(
typ=messages.IDENTIFIER_FQDN, value='example.com')
# Registration
self.contact = ('mailto:cert-admin@example.com', 'tel:+12025551212')
reg = messages.Registration(
contact=self.contact, key=KEY.public_key())
the_arg = dict(reg) # type: Dict
self.new_reg = messages.NewRegistration(**the_arg) # pylint: disable=star-args
self.regr = messages.RegistrationResource(
body=reg, uri='https://www.letsencrypt-demo.org/acme/reg/1')
# Authorization
authzr_uri = 'https://www.letsencrypt-demo.org/acme/authz/1'
challb = messages.ChallengeBody(
uri=(authzr_uri + '/1'), status=messages.STATUS_VALID,
chall=challenges.DNS(token=jose.b64decode(
'evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA')))
self.challr = messages.ChallengeResource(
body=challb, authzr_uri=authzr_uri)
self.authz = messages.Authorization(
identifier=messages.Identifier(
typ=messages.IDENTIFIER_FQDN, value='example.com'),
challenges=(challb,), combinations=None)
self.authzr = messages.AuthorizationResource(
body=self.authz, uri=authzr_uri)
# Reason code for revocation
self.rsn = 1
class BackwardsCompatibleClientV2Test(ClientTestBase):
"""Tests for acme.client.BackwardsCompatibleClientV2."""
def setUp(self):
super(BackwardsCompatibleClientV2Test, self).setUp()
# contains a loaded cert
self.certr = messages.CertificateResource(
body=messages_test.CERT)
loaded = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, CERT_SAN_PEM)
wrapped = jose.ComparableX509(loaded)
self.chain = [wrapped, wrapped]
self.cert_pem = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, messages_test.CERT.wrapped).decode()
single_chain = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, loaded).decode()
self.chain_pem = single_chain + single_chain
self.fullchain_pem = self.cert_pem + self.chain_pem
self.orderr = messages.OrderResource(
csr_pem=CSR_SAN_PEM)
def _init(self):
uri = 'http://www.letsencrypt-demo.org/directory'
from acme.client import BackwardsCompatibleClientV2
return BackwardsCompatibleClientV2(net=self.net,
key=KEY, server=uri)
def test_init_downloads_directory(self):
uri = 'http://www.letsencrypt-demo.org/directory'
from acme.client import BackwardsCompatibleClientV2
BackwardsCompatibleClientV2(net=self.net,
key=KEY, server=uri)
self.net.get.assert_called_once_with(uri)
def test_init_acme_version(self):
self.response.json.return_value = DIRECTORY_V1.to_json()
client = self._init()
self.assertEqual(client.acme_version, 1)
self.response.json.return_value = DIRECTORY_V2.to_json()
client = self._init()
self.assertEqual(client.acme_version, 2)
def test_query_registration_client_v2(self):
self.response.json.return_value = DIRECTORY_V2.to_json()
client = self._init()
self.response.json.return_value = self.regr.body.to_json()
self.assertEqual(self.regr, client.query_registration(self.regr))
def test_forwarding(self):
self.response.json.return_value = DIRECTORY_V1.to_json()
client = self._init()
self.assertEqual(client.directory, client.client.directory)
self.assertEqual(client.key, KEY)
self.assertEqual(client.deactivate_registration, client.client.deactivate_registration)
self.assertRaises(AttributeError, client.__getattr__, 'nonexistent')
self.assertRaises(AttributeError, client.__getattr__, 'new_account_and_tos')
self.assertRaises(AttributeError, client.__getattr__, 'new_account')
def test_new_account_and_tos(self):
# v2 no tos
self.response.json.return_value = DIRECTORY_V2.to_json()
with mock.patch('acme.client.ClientV2') as mock_client:
client = self._init()
client.new_account_and_tos(self.new_reg)
mock_client().new_account.assert_called_with(self.new_reg)
# v2 tos good
with mock.patch('acme.client.ClientV2') as mock_client:
mock_client().directory.meta.__contains__.return_value = True
client = self._init()
client.new_account_and_tos(self.new_reg, lambda x: True)
mock_client().new_account.assert_called_with(
self.new_reg.update(terms_of_service_agreed=True))
# v2 tos bad
with mock.patch('acme.client.ClientV2') as mock_client:
mock_client().directory.meta.__contains__.return_value = True
client = self._init()
def _tos_cb(tos):
raise errors.Error
self.assertRaises(errors.Error, client.new_account_and_tos,
self.new_reg, _tos_cb)
mock_client().new_account.assert_not_called()
# v1 yes tos
self.response.json.return_value = DIRECTORY_V1.to_json()
with mock.patch('acme.client.Client') as mock_client:
regr = mock.MagicMock(terms_of_service="TOS")
mock_client().register.return_value = regr
client = self._init()
client.new_account_and_tos(self.new_reg)
mock_client().register.assert_called_once_with(self.new_reg)
mock_client().agree_to_tos.assert_called_once_with(regr)
# v1 no tos
with mock.patch('acme.client.Client') as mock_client:
regr = mock.MagicMock(terms_of_service=None)
mock_client().register.return_value = regr
client = self._init()
client.new_account_and_tos(self.new_reg)
mock_client().register.assert_called_once_with(self.new_reg)
mock_client().agree_to_tos.assert_not_called()
@mock.patch('OpenSSL.crypto.load_certificate_request')
@mock.patch('acme.crypto_util._pyopenssl_cert_or_req_all_names')
def test_new_order_v1(self, mock__pyopenssl_cert_or_req_all_names,
unused_mock_load_certificate_request):
self.response.json.return_value = DIRECTORY_V1.to_json()
mock__pyopenssl_cert_or_req_all_names.return_value = ['example.com', 'www.example.com']
mock_csr_pem = mock.MagicMock()
with mock.patch('acme.client.Client') as mock_client:
mock_client().request_domain_challenges.return_value = mock.sentinel.auth
client = self._init()
orderr = client.new_order(mock_csr_pem)
self.assertEqual(orderr.authorizations, [mock.sentinel.auth, mock.sentinel.auth])
def test_new_order_v2(self):
self.response.json.return_value = DIRECTORY_V2.to_json()
mock_csr_pem = mock.MagicMock()
with mock.patch('acme.client.ClientV2') as mock_client:
client = self._init()
client.new_order(mock_csr_pem)
mock_client().new_order.assert_called_once_with(mock_csr_pem)
@mock.patch('acme.client.Client')
def test_finalize_order_v1_success(self, mock_client):
self.response.json.return_value = DIRECTORY_V1.to_json()
mock_client().request_issuance.return_value = self.certr
mock_client().fetch_chain.return_value = self.chain
deadline = datetime.datetime(9999, 9, 9)
client = self._init()
result = client.finalize_order(self.orderr, deadline)
self.assertEqual(result.fullchain_pem, self.fullchain_pem)
mock_client().fetch_chain.assert_called_once_with(self.certr)
@mock.patch('acme.client.Client')
def test_finalize_order_v1_fetch_chain_error(self, mock_client):
self.response.json.return_value = DIRECTORY_V1.to_json()
mock_client().request_issuance.return_value = self.certr
mock_client().fetch_chain.return_value = self.chain
mock_client().fetch_chain.side_effect = [errors.Error, self.chain]
deadline = datetime.datetime(9999, 9, 9)
client = self._init()
result = client.finalize_order(self.orderr, deadline)
self.assertEqual(result.fullchain_pem, self.fullchain_pem)
self.assertEqual(mock_client().fetch_chain.call_count, 2)
@mock.patch('acme.client.Client')
def test_finalize_order_v1_timeout(self, mock_client):
self.response.json.return_value = DIRECTORY_V1.to_json()
mock_client().request_issuance.return_value = self.certr
deadline = deadline = datetime.datetime.now() - datetime.timedelta(seconds=60)
client = self._init()
self.assertRaises(errors.TimeoutError, client.finalize_order,
self.orderr, deadline)
def test_finalize_order_v2(self):
self.response.json.return_value = DIRECTORY_V2.to_json()
mock_orderr = mock.MagicMock()
mock_deadline = mock.MagicMock()
with mock.patch('acme.client.ClientV2') as mock_client:
client = self._init()
client.finalize_order(mock_orderr, mock_deadline)
mock_client().finalize_order.assert_called_once_with(mock_orderr, mock_deadline)
def test_revoke(self):
self.response.json.return_value = DIRECTORY_V1.to_json()
with mock.patch('acme.client.Client') as mock_client:
client = self._init()
client.revoke(messages_test.CERT, self.rsn)
mock_client().revoke.assert_called_once_with(messages_test.CERT, self.rsn)
self.response.json.return_value = DIRECTORY_V2.to_json()
with mock.patch('acme.client.ClientV2') as mock_client:
client = self._init()
client.revoke(messages_test.CERT, self.rsn)
mock_client().revoke.assert_called_once_with(messages_test.CERT, self.rsn)
def test_update_registration(self):
self.response.json.return_value = DIRECTORY_V1.to_json()
with mock.patch('acme.client.Client') as mock_client:
client = self._init()
client.update_registration(mock.sentinel.regr, None)
mock_client().update_registration.assert_called_once_with(mock.sentinel.regr, None)
class ClientTest(ClientTestBase):
"""Tests for acme.client.Client."""
# pylint: disable=too-many-instance-attributes,too-many-public-methods
def setUp(self):
super(ClientTest, self).setUp()
self.directory = DIRECTORY_V1
# Registration
self.regr = self.regr.update(
terms_of_service='https://www.letsencrypt-demo.org/tos')
# Request issuance
self.certr = messages.CertificateResource(
body=messages_test.CERT, authzrs=(self.authzr,),
uri='https://www.letsencrypt-demo.org/acme/cert/1',
cert_chain_uri='https://www.letsencrypt-demo.org/ca')
from acme.client import Client
self.client = Client(
directory=self.directory, key=KEY, alg=jose.RS256, net=self.net)
def test_init_downloads_directory(self):
uri = 'http://www.letsencrypt-demo.org/directory'
from acme.client import Client
self.client = Client(
directory=uri, key=KEY, alg=jose.RS256, net=self.net)
self.net.get.assert_called_once_with(uri)
@mock.patch('acme.client.ClientNetwork')
def test_init_without_net(self, mock_net):
mock_net.return_value = mock.sentinel.net
alg = jose.RS256
from acme.client import Client
self.client = Client(
directory=self.directory, key=KEY, alg=alg)
mock_net.called_once_with(KEY, alg=alg, verify_ssl=True)
self.assertEqual(self.client.net, mock.sentinel.net)
def test_register(self):
# "Instance of 'Field' has no to_json/update member" bug:
# pylint: disable=no-member
self.response.status_code = http_client.CREATED
self.response.json.return_value = self.regr.body.to_json()
self.response.headers['Location'] = self.regr.uri
self.response.links.update({
'terms-of-service': {'url': self.regr.terms_of_service},
})
self.assertEqual(self.regr, self.client.register(self.new_reg))
# TODO: test POST call arguments
def test_update_registration(self):
# "Instance of 'Field' has no to_json/update member" bug:
# pylint: disable=no-member
self.response.headers['Location'] = self.regr.uri
self.response.json.return_value = self.regr.body.to_json()
self.assertEqual(self.regr, self.client.update_registration(self.regr))
# TODO: test POST call arguments
# TODO: split here and separate test
self.response.json.return_value = self.regr.body.update(
contact=()).to_json()
def test_deactivate_account(self):
self.response.headers['Location'] = self.regr.uri
self.response.json.return_value = self.regr.body.to_json()
self.assertEqual(self.regr,
self.client.deactivate_registration(self.regr))
def test_query_registration(self):
self.response.json.return_value = self.regr.body.to_json()
self.assertEqual(self.regr, self.client.query_registration(self.regr))
def test_agree_to_tos(self):
self.client.update_registration = mock.Mock()
self.client.agree_to_tos(self.regr)
regr = self.client.update_registration.call_args[0][0]
self.assertEqual(self.regr.terms_of_service, regr.body.agreement)
def _prepare_response_for_request_challenges(self):
self.response.status_code = http_client.CREATED
self.response.headers['Location'] = self.authzr.uri
self.response.json.return_value = self.authz.to_json()
def test_request_challenges(self):
self._prepare_response_for_request_challenges()
self.client.request_challenges(self.identifier)
self.net.post.assert_called_once_with(
self.directory.new_authz,
messages.NewAuthorization(identifier=self.identifier),
acme_version=1)
def test_request_challenges_deprecated_arg(self):
self._prepare_response_for_request_challenges()
self.client.request_challenges(self.identifier, new_authzr_uri="hi")
self.net.post.assert_called_once_with(
self.directory.new_authz,
messages.NewAuthorization(identifier=self.identifier),
acme_version=1)
def test_request_challenges_custom_uri(self):
self._prepare_response_for_request_challenges()
self.client.request_challenges(self.identifier)
self.net.post.assert_called_once_with(
'https://www.letsencrypt-demo.org/acme/new-authz', mock.ANY,
acme_version=1)
def test_request_challenges_unexpected_update(self):
self._prepare_response_for_request_challenges()
self.response.json.return_value = self.authz.update(
identifier=self.identifier.update(value='foo')).to_json()
self.assertRaises(
errors.UnexpectedUpdate, self.client.request_challenges,
self.identifier)
def test_request_challenges_wildcard(self):
wildcard_identifier = messages.Identifier(
typ=messages.IDENTIFIER_FQDN, value='*.example.org')
self.assertRaises(
errors.WildcardUnsupportedError, self.client.request_challenges,
wildcard_identifier)
def test_request_domain_challenges(self):
self.client.request_challenges = mock.MagicMock()
self.assertEqual(
self.client.request_challenges(self.identifier),
self.client.request_domain_challenges('example.com'))
def test_answer_challenge(self):
self.response.links['up'] = {'url': self.challr.authzr_uri}
self.response.json.return_value = self.challr.body.to_json()
chall_response = challenges.DNSResponse(validation=None)
self.client.answer_challenge(self.challr.body, chall_response)
# TODO: split here and separate test
self.assertRaises(errors.UnexpectedUpdate, self.client.answer_challenge,
self.challr.body.update(uri='foo'), chall_response)
def test_answer_challenge_missing_next(self):
self.assertRaises(
errors.ClientError, self.client.answer_challenge,
self.challr.body, challenges.DNSResponse(validation=None))
def test_retry_after_date(self):
self.response.headers['Retry-After'] = 'Fri, 31 Dec 1999 23:59:59 GMT'
self.assertEqual(
datetime.datetime(1999, 12, 31, 23, 59, 59),
self.client.retry_after(response=self.response, default=10))
@mock.patch('acme.client.datetime')
def test_retry_after_invalid(self, dt_mock):
dt_mock.datetime.now.return_value = datetime.datetime(2015, 3, 27)
dt_mock.timedelta = datetime.timedelta
self.response.headers['Retry-After'] = 'foooo'
self.assertEqual(
datetime.datetime(2015, 3, 27, 0, 0, 10),
self.client.retry_after(response=self.response, default=10))
@mock.patch('acme.client.datetime')
def test_retry_after_overflow(self, dt_mock):
dt_mock.datetime.now.return_value = datetime.datetime(2015, 3, 27)
dt_mock.timedelta = datetime.timedelta
dt_mock.datetime.side_effect = datetime.datetime
self.response.headers['Retry-After'] = "Tue, 116 Feb 2016 11:50:00 MST"
self.assertEqual(
datetime.datetime(2015, 3, 27, 0, 0, 10),
self.client.retry_after(response=self.response, default=10))
@mock.patch('acme.client.datetime')
def test_retry_after_seconds(self, dt_mock):
dt_mock.datetime.now.return_value = datetime.datetime(2015, 3, 27)
dt_mock.timedelta = datetime.timedelta
self.response.headers['Retry-After'] = '50'
self.assertEqual(
datetime.datetime(2015, 3, 27, 0, 0, 50),
self.client.retry_after(response=self.response, default=10))
@mock.patch('acme.client.datetime')
def test_retry_after_missing(self, dt_mock):
dt_mock.datetime.now.return_value = datetime.datetime(2015, 3, 27)
dt_mock.timedelta = datetime.timedelta
self.assertEqual(
datetime.datetime(2015, 3, 27, 0, 0, 10),
self.client.retry_after(response=self.response, default=10))
def test_poll(self):
self.response.json.return_value = self.authzr.body.to_json()
self.assertEqual((self.authzr, self.response),
self.client.poll(self.authzr))
# TODO: split here and separate test
self.response.json.return_value = self.authz.update(
identifier=self.identifier.update(value='foo')).to_json()
self.assertRaises(
errors.UnexpectedUpdate, self.client.poll, self.authzr)
def test_request_issuance(self):
self.response.content = CERT_DER
self.response.headers['Location'] = self.certr.uri
self.response.links['up'] = {'url': self.certr.cert_chain_uri}
self.assertEqual(self.certr, self.client.request_issuance(
messages_test.CSR, (self.authzr,)))
# TODO: check POST args
def test_request_issuance_missing_up(self):
self.response.content = CERT_DER
self.response.headers['Location'] = self.certr.uri
self.assertEqual(
self.certr.update(cert_chain_uri=None),
self.client.request_issuance(messages_test.CSR, (self.authzr,)))
def test_request_issuance_missing_location(self):
self.assertRaises(
errors.ClientError, self.client.request_issuance,
messages_test.CSR, (self.authzr,))
@mock.patch('acme.client.datetime')
@mock.patch('acme.client.time')
def test_poll_and_request_issuance(self, time_mock, dt_mock):
# clock.dt | pylint: disable=no-member
clock = mock.MagicMock(dt=datetime.datetime(2015, 3, 27))
def sleep(seconds):
"""increment clock"""
clock.dt += datetime.timedelta(seconds=seconds)
time_mock.sleep.side_effect = sleep
def now():
"""return current clock value"""
return clock.dt
dt_mock.datetime.now.side_effect = now
dt_mock.timedelta = datetime.timedelta
def poll(authzr): # pylint: disable=missing-docstring
# record poll start time based on the current clock value
authzr.times.append(clock.dt)
# suppose it takes 2 seconds for server to produce the
# result, increment clock
clock.dt += datetime.timedelta(seconds=2)
if len(authzr.retries) == 1: # no more retries
done = mock.MagicMock(uri=authzr.uri, times=authzr.times)
done.body.status = authzr.retries[0]
return done, []
# response (2nd result tuple element) is reduced to only
# Retry-After header contents represented as integer
# seconds; authzr.retries is a list of Retry-After
# headers, head(retries) is peeled of as a current
# Retry-After header, and tail(retries) is persisted for
# later poll() calls
return (mock.MagicMock(retries=authzr.retries[1:],
uri=authzr.uri + '.', times=authzr.times),
authzr.retries[0])
self.client.poll = mock.MagicMock(side_effect=poll)
mintime = 7
def retry_after(response, default):
# pylint: disable=missing-docstring
# check that poll_and_request_issuance correctly passes mintime
self.assertEqual(default, mintime)
return clock.dt + datetime.timedelta(seconds=response)
self.client.retry_after = mock.MagicMock(side_effect=retry_after)
def request_issuance(csr, authzrs): # pylint: disable=missing-docstring
return csr, authzrs
self.client.request_issuance = mock.MagicMock(
side_effect=request_issuance)
csr = mock.MagicMock()
authzrs = (
mock.MagicMock(uri='a', times=[], retries=(
8, 20, 30, messages.STATUS_VALID)),
mock.MagicMock(uri='b', times=[], retries=(
5, messages.STATUS_VALID)),
)
cert, updated_authzrs = self.client.poll_and_request_issuance(
csr, authzrs, mintime=mintime,
# make sure that max_attempts is per-authorization, rather
# than global
max_attempts=max(len(authzrs[0].retries), len(authzrs[1].retries)))
self.assertTrue(cert[0] is csr)
self.assertTrue(cert[1] is updated_authzrs)
self.assertEqual(updated_authzrs[0].uri, 'a...')
self.assertEqual(updated_authzrs[1].uri, 'b.')
self.assertEqual(updated_authzrs[0].times, [
datetime.datetime(2015, 3, 27),
# a is scheduled for 10, but b is polling [9..11), so it
# will be picked up as soon as b is finished, without
# additional sleeping
datetime.datetime(2015, 3, 27, 0, 0, 11),
datetime.datetime(2015, 3, 27, 0, 0, 33),
datetime.datetime(2015, 3, 27, 0, 1, 5),
])
self.assertEqual(updated_authzrs[1].times, [
datetime.datetime(2015, 3, 27, 0, 0, 2),
datetime.datetime(2015, 3, 27, 0, 0, 9),
])
self.assertEqual(clock.dt, datetime.datetime(2015, 3, 27, 0, 1, 7))
# CA sets invalid | TODO: move to a separate test
invalid_authzr = mock.MagicMock(
times=[], retries=[messages.STATUS_INVALID])
self.assertRaises(
errors.PollError, self.client.poll_and_request_issuance,
csr, authzrs=(invalid_authzr,), mintime=mintime)
# exceeded max_attempts | TODO: move to a separate test
self.assertRaises(
errors.PollError, self.client.poll_and_request_issuance,
csr, authzrs, mintime=mintime, max_attempts=2)
def test_check_cert(self):
self.response.headers['Location'] = self.certr.uri
self.response.content = CERT_DER
self.assertEqual(self.certr.update(body=messages_test.CERT),
self.client.check_cert(self.certr))
# TODO: split here and separate test
self.response.headers['Location'] = 'foo'
self.assertRaises(
errors.UnexpectedUpdate, self.client.check_cert, self.certr)
def test_check_cert_missing_location(self):
self.response.content = CERT_DER
self.assertRaises(
errors.ClientError, self.client.check_cert, self.certr)
def test_refresh(self):
self.client.check_cert = mock.MagicMock()
self.assertEqual(
self.client.check_cert(self.certr), self.client.refresh(self.certr))
def test_fetch_chain_no_up_link(self):
self.assertEqual([], self.client.fetch_chain(self.certr.update(
cert_chain_uri=None)))
def test_fetch_chain_single(self):
# pylint: disable=protected-access
self.client._get_cert = mock.MagicMock()
self.client._get_cert.return_value = (
mock.MagicMock(links={}), "certificate")
self.assertEqual([self.client._get_cert(self.certr.cert_chain_uri)[1]],
self.client.fetch_chain(self.certr))
def test_fetch_chain_max(self):
# pylint: disable=protected-access
up_response = mock.MagicMock(links={'up': {'url': 'http://cert'}})
noup_response = mock.MagicMock(links={})
self.client._get_cert = mock.MagicMock()
self.client._get_cert.side_effect = [
(up_response, "cert")] * 9 + [(noup_response, "last_cert")]
chain = self.client.fetch_chain(self.certr, max_length=10)
self.assertEqual(chain, ["cert"] * 9 + ["last_cert"])
def test_fetch_chain_too_many(self): # recursive
# pylint: disable=protected-access
response = mock.MagicMock(links={'up': {'url': 'http://cert'}})
self.client._get_cert = mock.MagicMock()
self.client._get_cert.return_value = (response, "certificate")
self.assertRaises(errors.Error, self.client.fetch_chain, self.certr)
def test_revoke(self):
self.client.revoke(self.certr.body, self.rsn)
self.net.post.assert_called_once_with(
self.directory[messages.Revocation], mock.ANY, acme_version=1)
def test_revocation_payload(self):
obj = messages.Revocation(certificate=self.certr.body, reason=self.rsn)
self.assertTrue('reason' in obj.to_partial_json().keys())
self.assertEquals(self.rsn, obj.to_partial_json()['reason'])
def test_revoke_bad_status_raises_error(self):
self.response.status_code = http_client.METHOD_NOT_ALLOWED
self.assertRaises(
errors.ClientError,
self.client.revoke,
self.certr,
self.rsn)
class ClientV2Test(ClientTestBase):
"""Tests for acme.client.ClientV2."""
def setUp(self):
super(ClientV2Test, self).setUp()
self.directory = DIRECTORY_V2
from acme.client import ClientV2
self.client = ClientV2(self.directory, self.net)
self.new_reg = self.new_reg.update(terms_of_service_agreed=True)
self.authzr_uri2 = 'https://www.letsencrypt-demo.org/acme/authz/2'
self.authz2 = self.authz.update(identifier=messages.Identifier(
typ=messages.IDENTIFIER_FQDN, value='www.example.com'),
status=messages.STATUS_PENDING)
self.authzr2 = messages.AuthorizationResource(
body=self.authz2, uri=self.authzr_uri2)
self.order = messages.Order(
identifiers=(self.authz.identifier, self.authz2.identifier),
status=messages.STATUS_PENDING,
authorizations=(self.authzr.uri, self.authzr_uri2),
finalize='https://www.letsencrypt-demo.org/acme/acct/1/order/1/finalize')
self.orderr = messages.OrderResource(
body=self.order,
uri='https://www.letsencrypt-demo.org/acme/acct/1/order/1',
authorizations=[self.authzr, self.authzr2], csr_pem=CSR_SAN_PEM)
def test_new_account(self):
self.response.status_code = http_client.CREATED
self.response.json.return_value = self.regr.body.to_json()
self.response.headers['Location'] = self.regr.uri
self.assertEqual(self.regr, self.client.new_account(self.new_reg))
def test_new_account_conflict(self):
self.response.status_code = http_client.OK
self.response.headers['Location'] = self.regr.uri
self.assertRaises(errors.ConflictError, self.client.new_account, self.new_reg)
def test_new_order(self):
order_response = copy.deepcopy(self.response)
order_response.status_code = http_client.CREATED
order_response.json.return_value = self.order.to_json()
order_response.headers['Location'] = self.orderr.uri
self.net.post.return_value = order_response
authz_response = copy.deepcopy(self.response)
authz_response.json.return_value = self.authz.to_json()
authz_response.headers['Location'] = self.authzr.uri
authz_response2 = self.response
authz_response2.json.return_value = self.authz2.to_json()
authz_response2.headers['Location'] = self.authzr2.uri
self.net.get.side_effect = (authz_response, authz_response2)
self.assertEqual(self.client.new_order(CSR_SAN_PEM), self.orderr)
@mock.patch('acme.client.datetime')
def test_poll_and_finalize(self, mock_datetime):
mock_datetime.datetime.now.return_value = datetime.datetime(2018, 2, 15)
mock_datetime.timedelta = datetime.timedelta
expected_deadline = mock_datetime.datetime.now() + datetime.timedelta(seconds=90)
self.client.poll_authorizations = mock.Mock(return_value=self.orderr)
self.client.finalize_order = mock.Mock(return_value=self.orderr)
self.assertEqual(self.client.poll_and_finalize(self.orderr), self.orderr)
self.client.poll_authorizations.assert_called_once_with(self.orderr, expected_deadline)
self.client.finalize_order.assert_called_once_with(self.orderr, expected_deadline)
@mock.patch('acme.client.datetime')
def test_poll_authorizations_timeout(self, mock_datetime):
now_side_effect = [datetime.datetime(2018, 2, 15),
datetime.datetime(2018, 2, 16),
datetime.datetime(2018, 2, 17)]
mock_datetime.datetime.now.side_effect = now_side_effect
self.response.json.side_effect = [
self.authz.to_json(), self.authz2.to_json(), self.authz2.to_json()]
self.assertRaises(
errors.TimeoutError, self.client.poll_authorizations, self.orderr, now_side_effect[1])
def test_poll_authorizations_failure(self):
deadline = datetime.datetime(9999, 9, 9)
challb = self.challr.body.update(status=messages.STATUS_INVALID,
error=messages.Error.with_code('unauthorized'))
authz = self.authz.update(status=messages.STATUS_INVALID, challenges=(challb,))
self.response.json.return_value = authz.to_json()
self.assertRaises(
errors.ValidationError, self.client.poll_authorizations, self.orderr, deadline)
def test_poll_authorizations_success(self):
deadline = datetime.datetime(9999, 9, 9)
updated_authz2 = self.authz2.update(status=messages.STATUS_VALID)
updated_authzr2 = messages.AuthorizationResource(
body=updated_authz2, uri=self.authzr_uri2)
updated_orderr = self.orderr.update(authorizations=[self.authzr, updated_authzr2])
self.response.json.side_effect = (
self.authz.to_json(), self.authz2.to_json(), updated_authz2.to_json())
self.assertEqual(self.client.poll_authorizations(self.orderr, deadline), updated_orderr)
def test_finalize_order_success(self):
updated_order = self.order.update(
certificate='https://www.letsencrypt-demo.org/acme/cert/')
updated_orderr = self.orderr.update(body=updated_order, fullchain_pem=CERT_SAN_PEM)
self.response.json.return_value = updated_order.to_json()
self.response.text = CERT_SAN_PEM
deadline = datetime.datetime(9999, 9, 9)
self.assertEqual(self.client.finalize_order(self.orderr, deadline), updated_orderr)
def test_finalize_order_error(self):
updated_order = self.order.update(error=messages.Error.with_code('unauthorized'))
self.response.json.return_value = updated_order.to_json()
deadline = datetime.datetime(9999, 9, 9)
self.assertRaises(errors.IssuanceError, self.client.finalize_order, self.orderr, deadline)
def test_finalize_order_timeout(self):
deadline = datetime.datetime.now() - datetime.timedelta(seconds=60)
self.assertRaises(errors.TimeoutError, self.client.finalize_order, self.orderr, deadline)
def test_revoke(self):
self.client.revoke(messages_test.CERT, self.rsn)
self.net.post.assert_called_once_with(
self.directory["revokeCert"], mock.ANY, acme_version=2)
def test_update_registration(self):
# "Instance of 'Field' has no to_json/update member" bug:
# pylint: disable=no-member
self.response.headers['Location'] = self.regr.uri
self.response.json.return_value = self.regr.body.to_json()
self.assertEqual(self.regr, self.client.update_registration(self.regr))
self.assertNotEqual(self.client.net.account, None)
self.assertEqual(self.client.net.post.call_count, 2)
self.assertTrue(DIRECTORY_V2.newAccount in self.net.post.call_args_list[0][0])
self.response.json.return_value = self.regr.body.update(
contact=()).to_json()
class MockJSONDeSerializable(jose.JSONDeSerializable):
# pylint: disable=missing-docstring
def __init__(self, value):
self.value = value
def to_partial_json(self):
return {'foo': self.value}
@classmethod
def from_json(cls, value):
pass # pragma: no cover
class ClientNetworkTest(unittest.TestCase):
"""Tests for acme.client.ClientNetwork."""
# pylint: disable=too-many-public-methods
def setUp(self):
self.verify_ssl = mock.MagicMock()
self.wrap_in_jws = mock.MagicMock(return_value=mock.sentinel.wrapped)
from acme.client import ClientNetwork
self.net = ClientNetwork(
key=KEY, alg=jose.RS256, verify_ssl=self.verify_ssl,
user_agent='acme-python-test')
self.response = mock.MagicMock(ok=True, status_code=http_client.OK)
self.response.headers = {}
self.response.links = {}
def test_init(self):
self.assertTrue(self.net.verify_ssl is self.verify_ssl)
def test_wrap_in_jws(self):
# pylint: disable=protected-access
jws_dump = self.net._wrap_in_jws(
MockJSONDeSerializable('foo'), nonce=b'Tg', url="url",
acme_version=1)
jws = acme_jws.JWS.json_loads(jws_dump)
self.assertEqual(json.loads(jws.payload.decode()), {'foo': 'foo'})
self.assertEqual(jws.signature.combined.nonce, b'Tg')
def test_wrap_in_jws_v2(self):
self.net.account = {'uri': 'acct-uri'}
# pylint: disable=protected-access
jws_dump = self.net._wrap_in_jws(
MockJSONDeSerializable('foo'), nonce=b'Tg', url="url",
acme_version=2)
jws = acme_jws.JWS.json_loads(jws_dump)
self.assertEqual(json.loads(jws.payload.decode()), {'foo': 'foo'})
self.assertEqual(jws.signature.combined.nonce, b'Tg')
self.assertEqual(jws.signature.combined.kid, u'acct-uri')
self.assertEqual(jws.signature.combined.url, u'url')
def test_check_response_not_ok_jobj_no_error(self):
self.response.ok = False
self.response.json.return_value = {}
with mock.patch('acme.client.messages.Error.from_json') as from_json:
from_json.side_effect = jose.DeserializationError
# pylint: disable=protected-access
self.assertRaises(
errors.ClientError, self.net._check_response, self.response)
def test_check_response_not_ok_jobj_error(self):
self.response.ok = False
self.response.json.return_value = messages.Error(
detail='foo', typ='serverInternal', title='some title').to_json()
# pylint: disable=protected-access
self.assertRaises(
messages.Error, self.net._check_response, self.response)
def test_check_response_not_ok_no_jobj(self):
self.response.ok = False
self.response.json.side_effect = ValueError
# pylint: disable=protected-access
self.assertRaises(
errors.ClientError, self.net._check_response, self.response)
def test_check_response_ok_no_jobj_ct_required(self):
self.response.json.side_effect = ValueError
for response_ct in [self.net.JSON_CONTENT_TYPE, 'foo']:
self.response.headers['Content-Type'] = response_ct
# pylint: disable=protected-access
self.assertRaises(
errors.ClientError, self.net._check_response, self.response,
content_type=self.net.JSON_CONTENT_TYPE)
def test_check_response_ok_no_jobj_no_ct(self):
self.response.json.side_effect = ValueError
for response_ct in [self.net.JSON_CONTENT_TYPE, 'foo']:
self.response.headers['Content-Type'] = response_ct
# pylint: disable=protected-access,no-value-for-parameter
self.assertEqual(
self.response, self.net._check_response(self.response))
def test_check_response_conflict(self):
self.response.ok = False
self.response.status_code = 409
# pylint: disable=protected-access
self.assertRaises(errors.ConflictError, self.net._check_response, self.response)
def test_check_response_jobj(self):
self.response.json.return_value = {}
for response_ct in [self.net.JSON_CONTENT_TYPE, 'foo']:
self.response.headers['Content-Type'] = response_ct
# pylint: disable=protected-access,no-value-for-parameter
self.assertEqual(
self.response, self.net._check_response(self.response))
def test_send_request(self):
self.net.session = mock.MagicMock()
self.net.session.request.return_value = self.response
# pylint: disable=protected-access
self.assertEqual(self.response, self.net._send_request(
'HEAD', 'http://example.com/', 'foo', bar='baz'))
self.net.session.request.assert_called_once_with(
'HEAD', 'http://example.com/', 'foo',
headers=mock.ANY, verify=mock.ANY, timeout=mock.ANY, bar='baz')
@mock.patch('acme.client.logger')
def test_send_request_get_der(self, mock_logger):
self.net.session = mock.MagicMock()
self.net.session.request.return_value = mock.MagicMock(
ok=True, status_code=http_client.OK,
headers={"Content-Type": "application/pkix-cert"},
content=b"hi")
# pylint: disable=protected-access
self.net._send_request('HEAD', 'http://example.com/', 'foo',
timeout=mock.ANY, bar='baz')
mock_logger.debug.assert_called_with(
'Received response:\nHTTP %d\n%s\n\n%s', 200,
'Content-Type: application/pkix-cert', b'aGk=')
def test_send_request_post(self):
self.net.session = mock.MagicMock()
self.net.session.request.return_value = self.response
# pylint: disable=protected-access
self.assertEqual(self.response, self.net._send_request(
'POST', 'http://example.com/', 'foo', data='qux', bar='baz'))
self.net.session.request.assert_called_once_with(
'POST', 'http://example.com/', 'foo',
headers=mock.ANY, verify=mock.ANY, timeout=mock.ANY, data='qux', bar='baz')
def test_send_request_verify_ssl(self):
# pylint: disable=protected-access
for verify in True, False:
self.net.session = mock.MagicMock()
self.net.session.request.return_value = self.response
self.net.verify_ssl = verify
# pylint: disable=protected-access
self.assertEqual(
self.response,
self.net._send_request('GET', 'http://example.com/'))
self.net.session.request.assert_called_once_with(
'GET', 'http://example.com/', verify=verify,
timeout=mock.ANY, headers=mock.ANY)
def test_send_request_user_agent(self):
self.net.session = mock.MagicMock()
# pylint: disable=protected-access
self.net._send_request('GET', 'http://example.com/',
headers={'bar': 'baz'})
self.net.session.request.assert_called_once_with(
'GET', 'http://example.com/', verify=mock.ANY,
timeout=mock.ANY,
headers={'User-Agent': 'acme-python-test', 'bar': 'baz'})
self.net._send_request('GET', 'http://example.com/',
headers={'User-Agent': 'foo2'})
self.net.session.request.assert_called_with(
'GET', 'http://example.com/',
verify=mock.ANY, timeout=mock.ANY, headers={'User-Agent': 'foo2'})
def test_send_request_timeout(self):
self.net.session = mock.MagicMock()
# pylint: disable=protected-access
self.net._send_request('GET', 'http://example.com/',
headers={'bar': 'baz'})
self.net.session.request.assert_called_once_with(
mock.ANY, mock.ANY, verify=mock.ANY, headers=mock.ANY,
timeout=45)
def test_del(self, close_exception=None):
sess = mock.MagicMock()
if close_exception is not None:
sess.close.side_effect = close_exception
self.net.session = sess
del self.net
sess.close.assert_called_once_with()
def test_del_error(self):
self.test_del(ReferenceError)
@mock.patch('acme.client.requests')
def test_requests_error_passthrough(self, mock_requests):
mock_requests.exceptions = requests.exceptions
mock_requests.request.side_effect = requests.exceptions.RequestException
# pylint: disable=protected-access
self.assertRaises(requests.exceptions.RequestException,
self.net._send_request, 'GET', 'uri')
def test_urllib_error(self):
# Using a connection error to test a properly formatted error message
try:
# pylint: disable=protected-access
self.net._send_request('GET', "http://localhost:19123/nonexistent.txt")
# Value Error Generated Exceptions
except ValueError as y:
self.assertEqual("Requesting localhost/nonexistent: "
"Connection refused", str(y))
# Requests Library Exceptions
except requests.exceptions.ConnectionError as z: #pragma: no cover
self.assertTrue("('Connection aborted.', error(111, 'Connection refused'))"
== str(z) or "[WinError 10061]" in str(z))
class ClientNetworkWithMockedResponseTest(unittest.TestCase):
"""Tests for acme.client.ClientNetwork which mock out response."""
# pylint: disable=too-many-instance-attributes
def setUp(self):
from acme.client import ClientNetwork
self.net = ClientNetwork(key=None, alg=None)
self.response = mock.MagicMock(ok=True, status_code=http_client.OK)
self.response.headers = {}
self.response.links = {}
self.checked_response = mock.MagicMock()
self.obj = mock.MagicMock()
self.wrapped_obj = mock.MagicMock()
self.content_type = mock.sentinel.content_type
self.all_nonces = [
jose.b64encode(b'Nonce'),
jose.b64encode(b'Nonce2'), jose.b64encode(b'Nonce3')]
self.available_nonces = self.all_nonces[:]
def send_request(*args, **kwargs):
# pylint: disable=unused-argument,missing-docstring
if self.available_nonces:
self.response.headers = {
self.net.REPLAY_NONCE_HEADER:
self.available_nonces.pop().decode()}
else:
self.response.headers = {}
return self.response
# pylint: disable=protected-access
self.net._send_request = self.send_request = mock.MagicMock(
side_effect=send_request)
self.net._check_response = self.check_response
self.net._wrap_in_jws = mock.MagicMock(return_value=self.wrapped_obj)
def check_response(self, response, content_type):
# pylint: disable=missing-docstring
self.assertEqual(self.response, response)
self.assertEqual(self.content_type, content_type)
return self.checked_response
def test_head(self):
self.assertEqual(self.response, self.net.head(
'http://example.com/', 'foo', bar='baz'))
self.send_request.assert_called_once_with(
'HEAD', 'http://example.com/', 'foo', bar='baz')
def test_get(self):
self.assertEqual(self.checked_response, self.net.get(
'http://example.com/', content_type=self.content_type, bar='baz'))
self.send_request.assert_called_once_with(
'GET', 'http://example.com/', bar='baz')
def test_post_no_content_type(self):
self.content_type = self.net.JOSE_CONTENT_TYPE
self.assertEqual(self.checked_response, self.net.post('uri', self.obj))
def test_post(self):
# pylint: disable=protected-access
self.assertEqual(self.checked_response, self.net.post(
'uri', self.obj, content_type=self.content_type))
self.net._wrap_in_jws.assert_called_once_with(
self.obj, jose.b64decode(self.all_nonces.pop()), "uri", 1)
self.available_nonces = []
self.assertRaises(errors.MissingNonce, self.net.post,
'uri', self.obj, content_type=self.content_type)
self.net._wrap_in_jws.assert_called_with(
self.obj, jose.b64decode(self.all_nonces.pop()), "uri", 1)
def test_post_wrong_initial_nonce(self): # HEAD
self.available_nonces = [b'f', jose.b64encode(b'good')]
self.assertRaises(errors.BadNonce, self.net.post, 'uri',
self.obj, content_type=self.content_type)
def test_post_wrong_post_response_nonce(self):
self.available_nonces = [jose.b64encode(b'good'), b'f']
self.assertRaises(errors.BadNonce, self.net.post, 'uri',
self.obj, content_type=self.content_type)
def test_post_failed_retry(self):
check_response = mock.MagicMock()
check_response.side_effect = messages.Error.with_code('badNonce')
# pylint: disable=protected-access
self.net._check_response = check_response
self.assertRaises(messages.Error, self.net.post, 'uri',
self.obj, content_type=self.content_type)
def test_post_not_retried(self):
check_response = mock.MagicMock()
check_response.side_effect = [messages.Error.with_code('malformed'),
self.checked_response]
# pylint: disable=protected-access
self.net._check_response = check_response
self.assertRaises(messages.Error, self.net.post, 'uri',
self.obj, content_type=self.content_type)
def test_post_successful_retry(self):
check_response = mock.MagicMock()
check_response.side_effect = [messages.Error.with_code('badNonce'),
self.checked_response]
# pylint: disable=protected-access
self.net._check_response = check_response
self.assertEqual(self.checked_response, self.net.post(
'uri', self.obj, content_type=self.content_type))
def test_head_get_post_error_passthrough(self):
self.send_request.side_effect = requests.exceptions.RequestException
for method in self.net.head, self.net.get:
self.assertRaises(
requests.exceptions.RequestException, method, 'GET', 'uri')
self.assertRaises(requests.exceptions.RequestException,
self.net.post, 'uri', obj=self.obj)
class ClientNetworkSourceAddressBindingTest(unittest.TestCase):
"""Tests that if ClientNetwork has a source IP set manually, the underlying library has
used the provided source address."""
def setUp(self):
self.source_address = "8.8.8.8"
def test_source_address_set(self):
from acme.client import ClientNetwork
net = ClientNetwork(key=None, alg=None, source_address=self.source_address)
for adapter in net.session.adapters.values():
self.assertTrue(self.source_address in adapter.source_address)
def test_behavior_assumption(self):
"""This is a test that guardrails the HTTPAdapter behavior so that if the default for
a Session() changes, the assumptions here aren't violated silently."""
from acme.client import ClientNetwork
# Source address not specified, so the default adapter type should be bound -- this
# test should fail if the default adapter type is changed by requests
net = ClientNetwork(key=None, alg=None)
session = requests.Session()
for scheme in session.adapters.keys():
client_network_adapter = net.session.adapters.get(scheme)
default_adapter = session.adapters.get(scheme)
self.assertEqual(client_network_adapter.__class__, default_adapter.__class__)
if __name__ == '__main__':
unittest.main() # pragma: no cover
| 43.692695
| 98
| 0.663035
|
acfd7ed5969f6a252f4d2bd7db089adec5b09081
| 199
|
py
|
Python
|
geo_search.py
|
mfranklin128/places-exploration
|
bedf49db0c08c98ba606b8a09ce0d25b38ed103b
|
[
"Apache-2.0"
] | null | null | null |
geo_search.py
|
mfranklin128/places-exploration
|
bedf49db0c08c98ba606b8a09ce0d25b38ed103b
|
[
"Apache-2.0"
] | null | null | null |
geo_search.py
|
mfranklin128/places-exploration
|
bedf49db0c08c98ba606b8a09ce0d25b38ed103b
|
[
"Apache-2.0"
] | null | null | null |
increment = 0.0012
starting_lat = 40.7066128
starting_long = -74.0129726
for i in range(50):
for j in range(10):
print str((starting_lat + increment*i)) + " " + str((starting_long + increment*j))
| 28.428571
| 84
| 0.698492
|
acfd7f787f2aa2aba9e07c30956c3f73be440645
| 1,680
|
py
|
Python
|
sphinx-sources/Examples/AiryBeam/AiryBeam2D_selfhealing_II.py
|
opticspy/lightpipes-python
|
dbd66e46ca8263a6e9bf7690e5f2b2551f93f4cb
|
[
"BSD-3-Clause"
] | null | null | null |
sphinx-sources/Examples/AiryBeam/AiryBeam2D_selfhealing_II.py
|
opticspy/lightpipes-python
|
dbd66e46ca8263a6e9bf7690e5f2b2551f93f4cb
|
[
"BSD-3-Clause"
] | null | null | null |
sphinx-sources/Examples/AiryBeam/AiryBeam2D_selfhealing_II.py
|
opticspy/lightpipes-python
|
dbd66e46ca8263a6e9bf7690e5f2b2551f93f4cb
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Self healing Airy beam
"""
from LightPipes import *
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
wavelength = 2.3*um
size = 30*mm
N = 500
x0=y0=1*mm
a1=a2=0.1/mm
#z=900*cm
w=1*mm
z= 0 *cm
dz = 2 *cm
fig, ax = plt.subplots(); ax.axis('off')
ims =[]
F0=Begin(size,wavelength,N)
F0=AiryBeam2D(F0,x0=x0, y0=y0, a1=a1, a2=a2)
#
I0=Intensity(F0)
for i in range(1000):
z += dz
if i == 20:
F0=F0=CircScreen(F0,w)
F=Fresnel(F0,z)
I=Intensity(F)
im = ax.imshow(I, animated = False)
#im = ax.imshow(I, animated = False);ax.text(1,100,color='white','z= {:4.1f}'.format(z/cm))
#im.text(1,1,'z= {:4.1f}'.format(z/cm))
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=5, blit=True,
repeat_delay=1000)
ani.save("movie.mp4")
# plt.figure(figsize = (9,5))
# plt.imshow(I0,
# extent=[-size/2/mm, size/2/mm, -size/2/mm, size/2/mm],
# origin='lower',
# cmap='jet',
# )
# plt.title('2D Airy beam')
# plt.xlabel('x [mm]')
# plt.ylabel('y [mm]')
# s = r'LightPipes for Python' + '\n'+ '2D Airy beam' + '\n\n'\
# r'$\lambda = {:4.2f}$'.format(wavelength/um) + r' ${\mu}m$' + '\n'\
# r'$size = {:4.2f}$'.format(size/mm) + r' $mm$' + '\n'\
# r'$N = {:4d}$'.format(N) + '\n'\
# r'$x_0 = y_0 = {:4.2f}$'.format(x0/mm) + r' $mm$' + '\n'\
# r'$a1 = a2 = $' + '{:4.2f}'.format(a1*mm) + r' $/mm$' + '\n'\
# r'$z = $' + '{:4.2f}'.format(z/cm) + r' $cm$' + '\n'\
# r'${\copyright}$ Fred van Goor, May 2022'
# plt.text(16, -10, s, bbox={'facecolor': 'white', 'pad': 5})
plt.show()
| 26.666667
| 95
| 0.526786
|
acfd80516befdaf2bff603286a97f624bfc4db28
| 11,341
|
py
|
Python
|
vendor/packages/translate-toolkit/translate/convert/pot2po.py
|
jgmize/kitsune
|
8f23727a9c7fcdd05afc86886f0134fb08d9a2f0
|
[
"BSD-3-Clause"
] | 2
|
2019-08-19T17:08:47.000Z
|
2019-10-05T11:37:02.000Z
|
vendor/packages/translate-toolkit/translate/convert/pot2po.py
|
jgmize/kitsune
|
8f23727a9c7fcdd05afc86886f0134fb08d9a2f0
|
[
"BSD-3-Clause"
] | null | null | null |
vendor/packages/translate-toolkit/translate/convert/pot2po.py
|
jgmize/kitsune
|
8f23727a9c7fcdd05afc86886f0134fb08d9a2f0
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2009 Zuza Software Foundation
#
# This file is part of translate.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert template files (like .pot or template .xlf files) translation files,
preserving existing translations.
See: http://translate.sourceforge.net/wiki/toolkit/pot2po for examples and
usage instructions.
"""
from translate.storage import factory
from translate.search import match
from translate.misc.multistring import multistring
from translate.tools import pretranslate
from translate.storage import poheader
def convertpot(input_file, output_file, template_file, tm=None, min_similarity=75, fuzzymatching=True, classes=factory.classes, **kwargs):
"""Main conversion function"""
input_store = factory.getobject(input_file, classes=classes)
template_store = None
if template_file is not None:
template_store = factory.getobject(template_file, classes=classes)
output_store = convert_stores(input_store, template_store, tm, min_similarity, fuzzymatching, **kwargs)
output_file.write(str(output_store))
return 1
def convert_stores(input_store, template_store, tm=None, min_similarity=75, fuzzymatching=True, **kwargs):
"""Actual conversion function, works on stores not files, returns
a properly initialized pretranslated output store, with structure
based on input_store, metadata based on template_store, migrates
old translations from template_store and pretranslating from tm"""
#prepare for merging
output_store = type(input_store)()
#create fuzzy matchers to be used by pretranslate.pretranslate_unit
matchers = []
_prepare_merge(input_store, output_store, template_store)
if fuzzymatching:
if template_store:
matcher = match.matcher(template_store, max_candidates=1, min_similarity=min_similarity, max_length=3000, usefuzzy=True)
matcher.addpercentage = False
matchers.append(matcher)
if tm:
matcher = pretranslate.memory(tm, max_candidates=1, min_similarity=min_similarity, max_length=1000)
matcher.addpercentage = False
matchers.append(matcher)
#initialize store
_store_pre_merge(input_store, output_store, template_store)
# Do matching
for input_unit in input_store.units:
if input_unit.istranslatable():
input_unit = pretranslate.pretranslate_unit(input_unit, template_store, matchers, mark_reused=True)
_unit_post_merge(input_unit, input_store, output_store, template_store)
output_store.addunit(input_unit)
#finalize store
_store_post_merge(input_store, output_store, template_store)
return output_store
##dispatchers
def _prepare_merge(input_store, output_store, template_store, **kwargs):
"""Prepare stores & TM matchers before merging."""
#dispatch to format specific functions
prepare_merge_hook = "_prepare_merge_%s" % input_store.__class__.__name__
if globals().has_key(prepare_merge_hook):
globals()[prepare_merge_hook](input_store, output_store, template_store, **kwargs)
#generate an index so we can search by source string and location later on
input_store.makeindex()
if template_store:
template_store.makeindex()
def _store_pre_merge(input_store, output_store, template_store, **kwargs) :
"""Initialize the new file with things like headers and metadata."""
#formats that implement poheader interface are a special case
if isinstance(input_store, poheader.poheader):
_do_poheaders(input_store, output_store, template_store)
#dispatch to format specific functions
store_pre_merge_hook = "_store_pre_merge_%s" % input_store.__class__.__name__
if globals().has_key(store_pre_merge_hook):
globals()[store_pre_merge_hook](input_store, output_store, template_store, **kwargs)
def _store_post_merge(input_store, output_store, template_store, **kwargs) :
"""Close file after merging all translations, used for adding
statistics, obsolete messages and similar wrapup tasks."""
#dispatch to format specific functions
store_post_merge_hook = "_store_post_merge_%s" % input_store.__class__.__name__
if globals().has_key(store_post_merge_hook):
globals()[store_post_merge_hook](input_store, output_store, template_store, **kwargs)
def _unit_post_merge(input_unit, input_store, output_store, template_store, **kwargs):
"""Handle any unit level cleanup and situations not handled by the merge()
function."""
#dispatch to format specific functions
unit_post_merge_hook = "_unit_post_merge_%s" % input_unit.__class__.__name__
if globals().has_key(unit_post_merge_hook):
globals()[unit_post_merge_hook](input_unit, input_store, output_store, template_store, **kwargs)
##format specific functions
def _prepare_merge_pofile(input_store, output_store, template_store):
"""PO format specific template preparation logic."""
#we need to revive obsolete units to be able to consider
#their translation when matching
if template_store:
for unit in template_store.units:
if unit.isobsolete():
unit.resurrect()
def _unit_post_merge_pounit(input_unit, input_store, output_store, template_store):
"""PO format specific plural string initializtion logic."""
#FIXME: do we want to do that for poxliff also?
if input_unit.hasplural() and len(input_unit.target) == 0:
# untranslated plural unit; Let's ensure that we have the correct number of plural forms:
nplurals, plural = output_store.getheaderplural()
if nplurals and nplurals.isdigit() and nplurals != '2':
input_unit.target = multistring([""]*int(nplurals))
def _store_post_merge_pofile(input_store, output_store, template_store):
"""PO format specific: adds newly obsoleted messages to end of store."""
#Let's take care of obsoleted messages
if template_store:
newlyobsoleted = []
for unit in template_store.units:
if unit.isheader():
continue
if unit.target and not (input_store.findunit(unit.source) or hasattr(unit, "reused")):
#not in .pot, make it obsolete
unit.makeobsolete()
newlyobsoleted.append(unit)
elif unit.isobsolete():
output_store.addunit(unit)
for unit in newlyobsoleted:
output_store.addunit(unit)
def _do_poheaders(input_store, output_store, template_store):
"""Adds initialized PO headers to output store."""
# header values
charset = "UTF-8"
encoding = "8bit"
project_id_version = None
pot_creation_date = None
po_revision_date = None
last_translator = None
language_team = None
mime_version = None
plural_forms = None
kwargs = {}
if template_store is not None and isinstance(template_store, poheader.poheader):
templateheadervalues = template_store.parseheader()
for key, value in templateheadervalues.iteritems():
if key == "Project-Id-Version":
project_id_version = value
elif key == "Last-Translator":
last_translator = value
elif key == "Language-Team":
language_team = value
elif key == "PO-Revision-Date":
po_revision_date = value
elif key in ("POT-Creation-Date", "MIME-Version"):
# don't know how to handle these keys, or ignoring them
pass
elif key == "Content-Type":
kwargs[key] = value
elif key == "Content-Transfer-Encoding":
encoding = value
elif key == "Plural-Forms":
plural_forms = value
else:
kwargs[key] = value
inputheadervalues = input_store.parseheader()
for key, value in inputheadervalues.iteritems():
if key in ("Project-Id-Version", "Last-Translator", "Language-Team", "PO-Revision-Date", "Content-Type", "Content-Transfer-Encoding", "Plural-Forms"):
# want to carry these from the template so we ignore them
pass
elif key == "POT-Creation-Date":
pot_creation_date = value
elif key == "MIME-Version":
mime_version = value
else:
kwargs[key] = value
output_header = output_store.init_headers(charset=charset, encoding=encoding, project_id_version=project_id_version,
pot_creation_date=pot_creation_date, po_revision_date=po_revision_date, last_translator=last_translator,
language_team=language_team, mime_version=mime_version, plural_forms=plural_forms, **kwargs)
# Get the header comments and fuzziness state
# initial values from pot file
input_header = input_store.header()
if input_header is not None:
if input_header.getnotes("developer"):
output_header.addnote(input_header.getnotes("developer"), origin="developer", position="replace")
if input_header.getnotes("translator"):
output_header.addnote(input_header.getnotes("translator"), origin="translator", position="replace")
output_header.markfuzzy(input_header.isfuzzy())
# override some values from input file
if template_store is not None:
template_header = template_store.header()
if template_header is not None:
if template_header.getnotes("translator"):
output_header.addnote(template_header.getnotes("translator"), "translator")
output_header.markfuzzy(template_header.isfuzzy())
def main(argv=None):
from translate.convert import convert
formats = {"pot": ("po", convertpot), ("pot", "po"): ("po", convertpot),
"xlf": ("xlf", convertpot), ("xlf", "xlf"): ("xlf", convertpot),
}
parser = convert.ConvertOptionParser(formats, usepots=True, usetemplates=True,
allowmissingtemplate=True, description=__doc__)
parser.add_option("", "--tm", dest="tm", default=None,
help="The file to use as translation memory when fuzzy matching")
parser.passthrough.append("tm")
defaultsimilarity = 75
parser.add_option("-s", "--similarity", dest="min_similarity", default=defaultsimilarity,
type="float", help="The minimum similarity for inclusion (default: %d%%)" % defaultsimilarity)
parser.passthrough.append("min_similarity")
parser.add_option("--nofuzzymatching", dest="fuzzymatching", action="store_false",
default=True, help="Disable fuzzy matching")
parser.passthrough.append("fuzzymatching")
parser.run(argv)
if __name__ == '__main__':
main()
| 43.619231
| 158
| 0.701878
|
acfd80541410aac72caa08cad19716094edb8e82
| 1,433
|
py
|
Python
|
src/NN1.py
|
patelotech/genetic_deep_learning
|
04d418a35a5adcb2f201dba657b6248263596eed
|
[
"MIT"
] | 5
|
2019-01-15T13:56:20.000Z
|
2019-11-15T12:57:51.000Z
|
src/NN1.py
|
IAARhub/genetic_deep_learning
|
bd45298d330c117f895d6873c8e36b8f40d0dc10
|
[
"MIT"
] | null | null | null |
src/NN1.py
|
IAARhub/genetic_deep_learning
|
bd45298d330c117f895d6873c8e36b8f40d0dc10
|
[
"MIT"
] | 1
|
2019-01-14T21:18:02.000Z
|
2019-01-14T21:18:02.000Z
|
import numpy as np
from nn_utils import sigmoid
learning_rate = 0.001
class NN1:
def __init__(
self,
train_x,
train_y,
test_x,
test_y,
epochs,
w=None,
print_step=None):
self.l1_error = 0
if w is None:
self.w0 = 2 * \
np.random.random((train_x.size / train_x.__len__(), 1)) - 1
else:
self.w0 = w
for j in xrange(1, epochs + 1):
l1 = sigmoid(np.dot(train_x, self.w0))
self.l1_error = train_y - l1
if (print_step is not None) and (
(j % print_step == 0) or j == epochs):
accuracy = self.calc_accuracy(test_x, test_y)
print(
"{},{},{}".format(
j,
np.mean(
np.abs(
self.l1_error)),
accuracy))
adjustment = self.l1_error * sigmoid(l1, deriv=True)
self.w0 += train_x.T.dot(adjustment) * learning_rate
def get_weight(self):
return self.w0
def get_error(self):
return np.mean(np.abs(self.l1_error))
def calc_accuracy(self, test_x, test_y):
prime_y = sigmoid(np.dot(test_x, self.w0))
y_error = test_y - prime_y
return 1 - np.mean(np.abs(y_error))
| 28.098039
| 75
| 0.465457
|
acfd80aa69dfb07945b994b3750d49cb0ead00a6
| 18,469
|
py
|
Python
|
far_ws/src/follow_ahead_rl/scripts/MCTS.py
|
alik604/ra
|
6058a9adb47db93bb86bcb2c224930c5731d663d
|
[
"Unlicense"
] | null | null | null |
far_ws/src/follow_ahead_rl/scripts/MCTS.py
|
alik604/ra
|
6058a9adb47db93bb86bcb2c224930c5731d663d
|
[
"Unlicense"
] | 5
|
2021-03-26T01:30:13.000Z
|
2021-04-22T22:19:03.000Z
|
far_ws/src/follow_ahead_rl/scripts/MCTS.py
|
alik604/ra
|
6058a9adb47db93bb86bcb2c224930c5731d663d
|
[
"Unlicense"
] | 1
|
2021-05-05T00:57:43.000Z
|
2021-05-05T00:57:43.000Z
|
import pickle
import os
import random
import math
import gym
import gym_gazeboros_ac
from time import sleep
from collections import deque
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from DDQN_Discrete import DeepQNetwork, Agent
np.set_printoptions(linewidth=np.inf)
ENV_NAME = 'gazeborosAC-v0'
PATH_POLY = './model_weights/HumanIntentNetwork/PolynomialRegressor'
WINDOW_SIZE = 10 # perhaps this should be large as possiable.
WINDOW_SIZE_predict_person = 4-1 # subject to what PolynomialRegressor is trained on than -1
# person_history = deque([0]*window_size, maxlen=window_size)
# person_history.appendleft([xyTheta])
# list(person_history)
if os.path.isfile(PATH_POLY):
REGR = pickle.load(open(PATH_POLY, 'rb'))
else:
# print(f"[Error!] PolynomialRegressor save not found")
raise Exception
def predict_person(state_history):
state_history = np.array(state_history[:WINDOW_SIZE_predict_person]).reshape(1, -1) # .flatten()
# print(f'[predict_person] state_history {state_history}')
state_history = PolynomialFeatures(degree=2).fit_transform(state_history) # should be fine, fiting shouldn't be necessary for PolynomialFeatures
y_pred = REGR.predict(state_history)
# print(f'y_pred {y_pred.flatten()}')
return y_pred.flatten().tolist()
# between pose and pose. where pose is position and orientation, and the 2nd pose is the "center"
def get_relative_pose(pos_goal, orientation_goal, pos_center, orientation_center):
center_pos = np.asarray(pos_center)
center_orientation = orientation_center
relative_pos = np.asarray(pos_goal)
relative_pos2 = np.asarray([relative_pos[0] + math.cos(orientation_goal),
relative_pos[1] + math.sin(orientation_goal)]).T
# transform the relative to center coordinat
rotation_matrix = np.array([[np.cos(center_orientation), np.sin(center_orientation)], # TODO Try both with viz. Ali: I think this is a bug. it should be -center_orientation, like in other `rotation_matrix`s
[-np.sin(center_orientation), np.cos(center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
relative_pos2 = np.matmul(relative_pos2, rotation_matrix)
global_pos = np.asarray(relative_pos + center_pos)
global_pos2 = np.asarray(relative_pos2 + center_pos)
new_orientation = np.arctan2(global_pos2[1]-global_pos[1], global_pos2[0]-global_pos[0])
return global_pos[0], global_pos[1], new_orientation
def MCTS(trajectories, person_history_actual, robot_history_actual, Nodes_to_explore):
""" MCTS
Args:
trajectories (np.array): precomputeed list
Nodes_to_explore (int): top N actions to consider
sum_of_qvals (int, optional): sum of rewards. Defaults to 0.
Returns:
int: recommended_move, which of N actions to take; which of the `trajectories` to take
Notes:
# TODO visusal the path of the robot and the human and reward
# DONE* sum_of_qvals is naive. mayne we should renormalize or discount
# the below was implementated
# 0.4*r1+0.4*r2*d**1+0.4*r3*d**2 // we can just def get_reward(self):
# 0.4+0.4+0.4 = 1.2 # surely this is better, i would take the step to get 0.4 and recompute
# 0.2+0.5+0.6 = 1.3
# 0.4+0.40+0.15 = 1.05 # surely this is better, the last is superior by far
# 0.4+0.45+0.10 = 1.00
# instead of jsut the policy output, we coinder the rewards outputast as well.
# 0.1 , 0.1, 0.15, 0.05
# renormaizel
# .15 is 150% better .10 ... 15/.10 = .15
"""
# print(f'\n\n[MCTS]')
# print(f'trajectories: {trajectories}')
# print(f'len(trajectories): {len(trajectories)}')
# predict person's next move (relative to robot's current pose)
person_pos = env.person.state_["position"]
person_theta = env.person.state_["orientation"]
x, y, theta = get_relative_pose(person_pos, person_theta, env.robot.state_["position"], env.robot.state_["orientation"])
person_history_actual.appendleft([x, y, theta]) # no loop needed, this function is the "loop"
person_past_state = list(person_history_actual) # TODO make note in docs about ording
person_next_state = predict_person(person_past_state)
# output of predict_person should be relative to robot...
# x, y, theta = get_relative_pose([person_next_state[0], person_next_state[1]], person_next_state[2], env.robot.state_["position"], env.robot.state_["orientation"])
person_history_predicted = person_history_actual.copy() # deque(person_past_state, maxlen=WINDOW_SIZE)
person_history_predicted.appendleft(person_next_state)
# print(f'person_next_state = {person_next_state}') # [xy[0], xy[1], state[2]]
# predict robot's next moves
robot_pos = env.robot.state_["position"]
robot_theta = env.robot.state_["orientation"]
robot_history_actual.appendleft([robot_pos[0], robot_pos[1], robot_theta])
state = env.get_observation_relative_robot() # this could come from main, but perhaps it is best to re-query
QValues = agent.action_probs(state) # there is no noise... exploration vs exploitation
idices = np.argsort(QValues)[::-1] # flip to get largest to smallest
idices = idices[:Nodes_to_explore] # select top N
# print(f'QValues:\n{QValues} | sum {np.sum(QValues):.2f}')
# print(f'idices to explore {idices}')
# Recursively search to choose which of moves to recommend
rewards = []
for idx in idices:
path_to_simulate = trajectories[idx]
# print(f'\n\n\n[call MCTS_recursive from MCTS] path_to_simulate x: {path_to_simulate[0]} | y: {path_to_simulate[1]}')
# print(f'trajectories are\n{trajectories}\n\n')
reward = 1.01 * (QValues[idx] + env.get_reward(simulate=False))
reward = MCTS_recursive(trajectories.copy(), robot_history_actual.copy(),
person_history_predicted.copy(), Nodes_to_explore-1, reward, idx)
rewards.append(reward)
best_idx = np.argmax(rewards)
recommended_move = idices[best_idx]
return recommended_move
def MCTS_recursive(trajectories, robot_history_predicted, person_history_predicted, Nodes_to_explore, past_rewards, exploring_idx, dTime=0.5):
""" MCTS_recursive
Args:
trajectories (np.array): precomputeed list of moves
path_to_simulate (np.array): path to take (simulated) to get to the start point
path_to_simulate[0] is x
path_to_simulate[0] is y
robot_history_predicted (deque): stored as [x, y, theta]
person_history_predicted (deque): [x, y, theta]. this is from `hinn_data_collector.py` which maintians a history for [xy[0], xy[1], state[2]]
Nodes_to_explore (int): top N actions to consider
past_rewards: past rewards
exploring_idx (int): debug index of which precomputer traj are we branching from
dTime (float): dTime in velocity calculations. it is 0.5 because that is what is used to sleep in `hinn_data_collector.py`
Returns:
int: recommended_move, which of N actions to take; which of the `trajectories` to take
"""
# TODO add.... array, orientation = self.get_global_position_orientation([x, y], orientation, self.robot)
# print(f'[start MCTS_recursive] exploring idx: {exploring_idx}') # \ntrajectories are\n{trajectories}\n\n')
QValues = []
states_to_simulate_robot = []
states_to_simulate_person = []
robot_pos = robot_history_predicted[0].copy()
path_to_simulate = trajectories[exploring_idx].copy()
# path_to_simulate = np.around(path_to_simulate, 2)
# print(f'[before] path_to_simulate x: {path_to_simulate[0]} | y: {path_to_simulate[1]}')
# // offset path_to_simulate with current robot pos
for idx in range(len(path_to_simulate[0])): # TODO this is wrong
path_to_simulate[0][idx] += robot_pos[0]
path_to_simulate[1][idx] += robot_pos[1]
path_to_simulate = np.around(path_to_simulate, 2)
# print(f'[after] path_to_simulate x: {path_to_simulate[0]} | y: {path_to_simulate[1]} | has been adjust with x {robot_pos[0]} and y {robot_pos[1]}')
# // [robot] account for history. since env outputs states based on window of last 10. We also ensure pose is relative to robot.
robot_hist = list(robot_history_predicted)
robot_hist.reverse()
for idx in range(len(robot_hist)-1):
last_x, last_y, last_theta = robot_hist[idx][0], robot_hist[idx][1], robot_hist[idx][2]
x, y, theta = robot_hist[idx+1][0], robot_hist[idx+1][1], robot_hist[idx+1][2]
x, y, theta = get_relative_pose([x,y], theta, [last_x, last_y], last_theta)
robot_state = {}
angular_velocity = (theta-last_theta)/dTime
linear_velocity = np.hypot(x-last_x, y-last_y)/dTime # TODO delta time here is worng, need a elegant way to have it. it's dTime or dTime/NUMBER_SUB_STEPS, depending if MCTS or MCTS_recursive called MCTS_recursive (as `robot_history_predicted` might be "robot_history_predicted" or "robot_history_actual" )
robot_state["velocity"] = (linear_velocity, angular_velocity)
robot_state["position"] = (x, y)
robot_state["orientation"] = theta
states_to_simulate_robot.append(robot_state)
# TODO why not just the last. that is where we step to...
# // [robot] account for `path_to_simulate`, our chosen trajectory.
NUMBER_SUB_STEPS = len(path_to_simulate[0])
time_step = dTime/NUMBER_SUB_STEPS
for idx in range(NUMBER_SUB_STEPS):
robot_state = {}
x, y, theta = path_to_simulate[0][idx], path_to_simulate[1][idx], path_to_simulate[2][idx]
last_x, last_y, last_theta = robot_history_predicted[0][0], robot_history_predicted[0][1], robot_history_predicted[0][2]
angular_velocity = (theta-last_theta)/time_step # fist elem is latest. angular_velocity is dTheta/dTime
linear_velocity = np.hypot(x-last_x, y-last_y)/time_step
x, y, theta = get_relative_pose([x,y], theta, [last_x, last_y], last_theta)
robot_state["velocity"] = (linear_velocity, angular_velocity)
robot_state["position"] = (x, y)
robot_state["orientation"] = theta
states_to_simulate_robot.append(robot_state)
robot_history_predicted.appendleft([x, y, theta])
# print(f'robot_state["position"] {robot_state["position"]}')
# // [person] predict person's next move. and account for history # add newest to front. flip and build `states_to_simulate_person`. ref for math https://courses.lumenlearning.com/boundless-physics/chapter/quantities-of-rotational-kinematics/
person_next_state = predict_person(list(person_history_predicted))
person_history_predicted.appendleft(person_next_state)
state_ = states_to_simulate_robot[-1]
pos_state_ = state_['position']
theta_state_ = state_['orientation']
person_hist = list(person_history_predicted)
person_hist.reverse() # oldest to latest
for idx in range(len(person_hist)-1):
last_x, last_y, last_theta = person_hist[idx][0], person_hist[idx][1], person_hist[idx][2]
last_x, last_y, last_theta = get_relative_pose([last_x, last_y], last_theta, pos_state_, theta_state_)
x, y, theta = person_hist[idx+1][0], person_hist[idx+1][1], person_hist[idx+1][2]
x, y, theta = get_relative_pose([x,y], theta, pos_state_, theta_state_)
person_state = {}
angular_velocity = (theta-last_theta)/dTime
linear_velocity = np.hypot(x-last_x, y-last_y)/dTime
person_state["velocity"] = (linear_velocity, angular_velocity)
person_state["position"] = (x, y)
person_state["orientation"] = theta
states_to_simulate_person.append(person_state)
# print(f'predicted next state of person = {person_state}') # [xy[0], xy[1], state[2]]
# predict robot's next best moves & select top N
state = env.get_observation_relative_robot(states_to_simulate_robot, states_to_simulate_person)
QValues = agent.action_probs(state) # there is no noise... exploration vs exploitation
idices = np.argsort(QValues)[::-1] # flip to get largest to smallest
idices = idices[:Nodes_to_explore] # select top N
# print(f'QValues:\n{QValues} | sum {np.sum(QValues):.2f}')
# print(f'idices to explore {idices}')
if Nodes_to_explore == 1:
# print(f'[tail] path_to_simulate: {path_to_simulate}')
return 0.975*(QValues[idices[0]]*env.get_reward(simulate=True)) + past_rewards
else:
# Recursively search
rewards = []
# print(f'robot_pos was {robot_pos}')
# print(f'robot_pos is now {robot_history_predicted[0]}')
for idx in idices:
# print(f'\n\n\n[call MCTS_recursive from MCTS_recursive] path_to_simulate x: {path_to_simulate[0]} | y: {path_to_simulate[1]}')
# we need both scalers
current_reward = (0.98*QValues[idx]*env.get_reward(simulate=True)) + (0.99 * past_rewards)
# print(f'[before recursivly calling MCTS_recursive]\ntrajectories are\n{trajectories}\n\n')
reward = MCTS_recursive(trajectories.copy(), robot_history_predicted.copy(),
person_history_predicted.copy(), Nodes_to_explore-1, current_reward, exploring_idx=idx)
rewards.append(reward)
best_idx = np.argmax(rewards)
recommended_move = idices[best_idx]
# print(f'[MCTS_recursive] recommended_move is {recommended_move}')
return recommended_move
if __name__ == '__main__':
trajectories = []
with open('discrete_action_space.pickle', 'rb') as handle:
x = pickle.load(handle)
x, y, theta = list(zip(*x))
for i in range(len(x)):
# print(f'\t{x[i]}, {y[i]}')
# plt.plot(x[i], y[i])
trajectories.extend([[x[i], y[i], theta[i]]])
# plt.show()
# trajectories = [[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]],
# [[2.0, 2.0, 2.0, 2.0], [2.0, 2.0, 2.0, 2.0]],
# [[3.0, 3.0, 3.0, 3.0], [3.0, 3.0, 3.0, 3.0]],
# [[10.0, 10.0, 10.0, 10.0], [10.0, 10.0, 10.0, 10.0]]]
# print(f'trajectories: {trajectories} \n| numb of trajectories is: {len(trajectories)}')
# trajectories = np.array(trajectories, dtype=object)
# for i in range(len(trajectories)):
# for ii in range(len(trajectories[i])):
# trajectories[i][ii] = np.array(trajectories[i][ii])
# print(trajectories[i][ii])
print(f'trajectories: {trajectories}')
person_history_actual = deque([0]*WINDOW_SIZE, maxlen=WINDOW_SIZE)
robot_history_actual = deque([0]*WINDOW_SIZE, maxlen=WINDOW_SIZE)
n_actions = len(trajectories)
observation_shape = 47
# Default/from author. Note that epsilon-greedy is used in `choose_action` but not in `action_probs`, so the prams don't matter
# agent = Agent(gamma=0.99, epsilon=0.35, batch_size=64, n_actions=n_actions, eps_end=0.01,
# input_dims=[observation_shape], lr=0.001, eps_dec=5e-4, ALIs_over_training=1, file_label = "DDQN_MCTS")
agent = Agent(gamma=0.99, batch_size=512, n_actions=n_actions, input_dims=[observation_shape],
lr=0.01, ALIs_over_training=2, file_label = "DDQN_MCTS") # changed from eps_dec=5e-4
# agent.save_models() # for after I/O pram change
agent.load_models()
print('START Test')
N_GAMES = 100000
MODES = [0,1,2]
rewards = []
best_score = -100
env = gym.make(ENV_NAME).unwrapped
env.set_agent(0)
# linear_velocity, angular_velocity. from 0 to 1, a % of the max_linear_vel (0.8) & max_angular_vel (1.8)
for game in range(N_GAMES):
state_rel_person = env.reset()
# env.set_agent(0)
score = 0
done = False
person_pos = env.person.state_["position"]
person_theta = env.person.state_["orientation"]
for _ in range(WINDOW_SIZE):
person_history_actual.appendleft([person_pos[0], person_pos[1], person_theta])
robot_pos = env.robot.state_["position"]
robot_theta = env.robot.state_["orientation"]
for _ in range(WINDOW_SIZE):
robot_history_actual.appendleft([robot_pos[0], robot_pos[1], robot_theta])
mode = 1 # random.choice(MODES)
print(f"Running game: {game} of {N_GAMES} | Person Mode {mode}")
env.set_person_mode(mode) # mode
observation = env.get_observation_relative_robot()
# env.person.pause()
# env.person.resume()
while not done:
# print(f'state:\n{state}')
recommended_move = MCTS(trajectories.copy(), person_history_actual, robot_history_actual, Nodes_to_explore=3)
# print(f'in main loop recommended_move is {recommended_move}')
# take action
# for cords in trajectories[recommended_move]: # TODO confirm this is right
# cords = trajectories[recommended_move][-1]
# action = [cords[0], cords[1]]
# state_rel_person, reward, done, _ = env.step(action) # TODO I think this need to be relative
path_to_simulate = trajectories[recommended_move].copy()
current_robot_pos = env.robot.state_['position']
x, y = path_to_simulate[-1][0] + current_robot_pos[0], path_to_simulate[-1][1] + current_robot_pos[1] # off set with current_robot_pos
x, y, theta = get_relative_pose([x, y], env.robot.state_['orientation'], [current_robot_pos[0], current_robot_pos[1]], env.robot.state_['orientation'])
state_rel_person, reward, done, _ = env.step([x, y])
observation_ = env.get_observation_relative_robot()
agent.store_transition(observation, recommended_move, reward, observation_, done)
agent.learn()
observation = observation_
score +=reward
# if score > best_score:
# best_score = score
# agent.save_models()
if i % 10 == 0:
agent.save_models() # TODO necessary evil
rewards.append(score)
agent.save_models()
print("DONE")
env.close()
print(f'rewards: \n{rewards}')
plt.plot(rewards)
exit(0)
| 47.723514
| 314
| 0.672532
|
acfd81f55399c91cca1e9a8563443c9b9657c595
| 389
|
py
|
Python
|
Geekie/Geekie/wsgi.py
|
1ricardo66/Django2.0
|
e29afefc908723525d60ea40da353a7ab5b27d1c
|
[
"Apache-2.0"
] | 1
|
2018-08-21T12:26:02.000Z
|
2018-08-21T12:26:02.000Z
|
Geekie/Geekie/wsgi.py
|
1ricardo66/Django2.0
|
e29afefc908723525d60ea40da353a7ab5b27d1c
|
[
"Apache-2.0"
] | null | null | null |
Geekie/Geekie/wsgi.py
|
1ricardo66/Django2.0
|
e29afefc908723525d60ea40da353a7ab5b27d1c
|
[
"Apache-2.0"
] | null | null | null |
"""
WSGI config for Geekie project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Geekie.settings")
application = get_wsgi_application()
| 22.882353
| 78
| 0.784062
|
acfd8219fc20464b62ce88556f542fa6c1015ce8
| 14,974
|
py
|
Python
|
.history/src/Simulador_20200710164625.py
|
eduardodut/Trabalho_final_estatistica_cd
|
fbedbbea6bdd7a79e1d62030cde0fab4e93fc338
|
[
"MIT"
] | null | null | null |
.history/src/Simulador_20200710164625.py
|
eduardodut/Trabalho_final_estatistica_cd
|
fbedbbea6bdd7a79e1d62030cde0fab4e93fc338
|
[
"MIT"
] | null | null | null |
.history/src/Simulador_20200710164625.py
|
eduardodut/Trabalho_final_estatistica_cd
|
fbedbbea6bdd7a79e1d62030cde0fab4e93fc338
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from Matriz_esferica import Matriz_esferica
from Individuo import Individuo, Fabrica_individuo
import random
from itertools import permutations
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from scipy.sparse import csr_matrix, lil_matrix
class Simulador():
def __init__(
self,
tamanho_matriz, #numero de linhas e colunas da matriz esférica
percentual_inicial_tipo1, #percentual inicial da população que será infectada tipo 1
percentual_inicial_tipo2, #percentual inicial da população que será infectada tipo 2
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.num_atualizacoes = 0
self.lista_infectados_tipo_2 = []
self.lista_infectados_tipo_1 = []
self.num_curados = 0
self.num_mortos = 0
self.chance_infeccao = chance_infeccao
self.chance_infeccao_tipo2 = chance_infeccao_tipo2
self.chance_morte = chance_morte
self.fabrica_individuo = Fabrica_individuo(atualizacoes_cura)
self.df_individuos = pd.DataFrame(index= range(tamanho_matriz), columns=range(tamanho_matriz))
self.df_individuos.iloc[1,1] = self.fabrica_individuo.criar_individuo(10,(0,0))
print("deu certo")
print(self.df_individuos)
#self.df_individuos.iloc[0,0] = 0
#lista que guarda o posicionamento
self.lista_matrizes_posicionamento = []
#guarda em matriz esparsa o status de saúde de cada elemento
self.matriz_status = lil_matrix((tamanho_matriz,tamanho_matriz), dtype=np.uint8)
#guarda em matriz esparsa a localização do objeto em sua respectiva lista
self.matriz_localizacao = lil_matrix((tamanho_matriz,tamanho_matriz), dtype=np.uint32)
self.fabrica_individuo = Fabrica_individuo(atualizacoes_cura)
#objeto que é responsável por validar a movimentação no grid n x n
self.matriz_esferica = Matriz_esferica(tamanho_matriz)
self.populacao_inicial = int(tamanho_matriz**2)
self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)
self.num_inicial_tipo1 = int(self.populacao_inicial * percentual_inicial_tipo1)
self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)
self.popular(tamanho_matriz)
dict = {
'num_sadios':self.num_inicial_sadios,
'num_infect_t1':self.num_inicial_tipo1,
'num_infect_t2':self.num_inicial_tipo2,
'num_curados':0,
'num_mortos':0}
#dataframe que guardará os resultados de cada atualização
self.dataframe = pd.DataFrame(index = [0])
self.salvar_posicionamento()
def salvar_posicionamento(self):
self.lista_matrizes_posicionamento.append(self.matriz_status)
def verificar_infeccao(self, lista_infectantes):
lista_novos_infectados_tipo1 = []
lista_novos_infectados_tipo2 = []
#itera sobre sobre a lista de individuos que infectam e cada um realiza a tividade de infectar
for infectante in lista_infectantes:
indice_x = infectante.posicao[0]
indice_y = infectante.posicao[1]
#busca os vizinhos do infectante atual
lista_vizinhos = self.matriz_esferica.get_vizinhos(indice_x, indice_y)
#Para cada vizinho, se ele for sadio, é gerado um número aleatório para verificar se foi infectado
for vizinho in lista_vizinhos:
x = vizinho[0]
y = vizinho[1]
#verificação de SADIO
if self.matriz_status[x,y] == Individuo.SADIO:
#verificação do novo status
novo_status = infectante.infectar(chance_infeccao, chance_infeccao_tipo2)
#se for um infectado tipo 1
if novo_status == Individuo.INFECTADO_TIPO_1:
#adiciona na lista de novos tipo 1
lista_novos_infectados_tipo1.append(self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_1,(x,y)))
#modifica o status na matriz de status
self.matriz_status[x,y] = Individuo.INFECTADO_TIPO_1
self.matriz_localizacao[x,y] = len(lista_novos_infectados_tipo1) - 1
if novo_status == Individuo.INFECTADO_TIPO_2:
#adiciona na lista de novos tipo 2
lista_novos_infectados_tipo2.append(self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_2,(x,y)))
#modifica o status na matriz de status
self.matriz_status[x,y] = Individuo.INFECTADO_TIPO_2
self.matriz_localizacao[x,y] = len(lista_novos_infectados_tipo2) - 1
return lista_novos_infectados_tipo1, lista_novos_infectados_tipo2
def verificar_morte(self, lista_infectantes_tipo2):
num_mortos = 0
num_curados = 0
for infectante in lista_infectantes_tipo2:
novo_status = infectante.checagem_morte(self.chance_morte)
if novo_status == Individuo.MORTO:
num_mortos += 1
self.matriz_status[infectante.posicao[0], infectante.posicao[1]] = Individuo.MORTO
lista_infectantes_tipo2.remove(infectante)
if novo_status == Individuo.CURADO:
num_curados += 1
self.matriz_status[infectante.posicao[0], infectante.posicao[1]] = Individuo.CURADO
lista_infectantes_tipo2.remove(infectante)
return num_mortos, num_curados
def verificar_cura(self, lista_infectantes):
num_curados = 0
for infectante in lista_infectantes:
novo_status = infectante.checagem_cura()
if novo_status == Individuo.CURADO:
num_curados += 1
self.matriz_status[infectante.posicao[0], infectante.posicao[1]] = Individuo.CURADO
lista_infectantes_tipo.remove(infectante)
return num_curados
def iterar(self):
#Verifica os novos infectados a partir dos atuais infectantes na matriz
lista_novos_infectados_tipo1_1, lista_novos_infectados_tipo2_1 = self.verificar_infeccao(self.lista_infectados_tipo_1)
lista_novos_infectados_tipo1_2, lista_novos_infectados_tipo2_2 = self.verificar_infeccao(self.lista_infectados_tipo_2)
#Verifica morte dos tipo 2
num_mortos_atualizacao, num_curados_t2_atualizacao = self.verificar_morte(self.lista_infectados_tipo_2)
#retirar os mortos da atualização da lista de infectados tipo 2
self.lista_infectados_tipo_2 = [i for i in self.lista_infectados_tipo_2 if i.status != Individuo.MORTO]
#atualiza o novo número de mortos
self.num_mortos += num_mortos_atualizacao
#Verificar cura
num_curados_t1_atualizacao = self.verificar_cura(self.lista_infectados_tipo_1)
#retirar os curados das lista de infectados tipo 1 e 2
self.lista_infectados_tipo_2 = [i for i in self.lista_infectados_tipo_2 if i.status != Individuo.CURADO]
self.lista_infectados_tipo_1 = [i for i in self.lista_infectados_tipo_1 if i.status != Individuo.CURADO]
#adiciona os novos curados na lista geral de curados
self.num_curados = self.num_curados + num_curados_t1_atualizacao + num_curados_t2_atualizacao
# self. #movimentar infectantes:
for infectante in self.lista_infectados_tipo_1:
self.mover_infectante(infectante)
for infectante in self.lista_infectados_tipo_2:
self.mover_infectante(infectante)
#adicionar os novos infectados tipo 1 e 2 para as respectivas listas
self.lista_infectados_tipo_2 = self.lista_infectados_tipo_2 + lista_novos_infectados_tipo2_1 + lista_novos_infectados_tipo2_2
self.lista_infectados_tipo_1 = self.lista_infectados_tipo_1 + lista_novos_infectados_tipo1_1 + lista_novos_infectados_tipo1_2
num_tipo_1 = len(self.lista_infectados_tipo_1)
num_tipo_2 = len(self.lista_infectados_tipo_2)
dict = {
'num_sadios':self.populacao_inicial - self.num_mortos - self.num_curados - num_tipo_1 - num_tipo_2 ,
'num_infect_t1':num_tipo_1,
'num_infect_t2':num_tipo_2,
'num_curados':self.num_curados,
'num_mortos':self.num_mortos}
self.dataframe = self.dataframe.append(dict, ignore_index=True)
print("num t1: ", num_tipo_1)
print("num t2: ", num_tipo_2)
print("num curados: ", self.num_curados)
print("num mortos: ", self.num_mortos)
print("---------")
#salva a nova matriz de status
self.salvar_posicionamento()
#adiciona 1 ao número de atualizações realizadas na matriz
self.num_atualizacoes +=1
def popular(self, tamanho_matriz):
#lista de possíveis combinações de índices da matriz de dados
permutacoes = permutations(list(range(tamanho_matriz)),2)
#conversão para lista de tuplas(x,y)
lista_indices = list(permutacoes)
#embaralhamento dos índices
random.shuffle(lista_indices)
#cria o primeiro tipo1:
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.lista_infectados_tipo_1.append(self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_1,(ind_x,ind_y)))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_1
self.matriz_localizacao[ind_x,ind_y] = 0
#cria o restante dos tipos 1
for i in range(1,self.num_inicial_tipo1):
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.lista_infectados_tipo_1.append(self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_1,(ind_x,ind_y)))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_1
self.matriz_localizacao[ind_x,ind_y] = len(self.lista_infectados_tipo_1) - 1
#cria o restante dos tipo 2:
for indice in range(self.num_inicial_tipo2):
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.lista_infectados_tipo_2.append(self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_2,(ind_x,ind_y)))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_2
self.matriz_localizacao[ind_x,ind_y] = len(self.lista_infectados_tipo_2) - 1
def trocar_status_localizacao(self,ponto_ini,ponto_final):
x_ini = ponto_ini[0]
y_ini = ponto_ini[1]
x_fin = ponto_final[0]
y_fin = ponto_final[1]
aux1 = self.matriz_status[x_fin,y_fin]
self.matriz_status[x_fin,y_fin] = self.matriz_status[x_ini,y_ini]
self.matriz_status[x_ini,y_ini] = aux1
aux2 = self.matriz_localizacao[x_fin,y_fin]
self.matriz_localizacao[x_fin,y_fin] = self.matriz_localizacao[x_ini,y_ini]
self.matriz_localizacao[x_ini,y_ini] = aux2
def mover_infectante(self, infectante):
pos_x, pos_y = infectante.posicao[0], infectante.posicao[1]
rng_posicao = random.random()
if rng_posicao <=0.25:
#move pra cima
pos_x -= 1
elif rng_posicao <=0.5:
#move pra baixo
pos_x += 1
elif rng_posicao <=0.75:
#move para esquerda
pos_y -= 1
else:
#move para direita
pos_y += 1
novo_x, novo_y = self.matriz_esferica.valida_ponto_matriz(pos_x, pos_y)
#descobre qual individuo ocupa atualmente a posição para atribuí-lo a posição de quem o está substituindo
status = self.matriz_status[novo_x, novo_y]
if status == Individuo.INFECTADO_TIPO_1:
individuo_ocupante_destino = self.matriz_localizacao[novo_x, novo_y]
print(status)
print(individuo_ocupante_destino)
print(len(self.lista_infectados_tipo_1))
self.lista_infectados_tipo_1[individuo_ocupante_destino].posicao = infectante.posicao
elif status == Individuo.INFECTADO_TIPO_2:
individuo_ocupante_destino = self.matriz_localizacao[novo_x, novo_y]
print(status)
print(individuo_ocupante_destino)
print(len(self.lista_infectados_tipo_2))
self.lista_infectados_tipo_2[individuo_ocupante_destino].posicao = infectante.posicao
# elif status == Individuo.CURADO:
# individuo_ocupante_destino = self.matriz_localizacao[novo_x, novo_y]
# self.lista_curados[individuo_ocupante_destino].posicao = infectante.posicao
# elif status == Individuo.MORTO:
# individuo_ocupante_destino = self.matriz_localizacao[novo_x, novo_y]
# self.lista_matrizes_posicionamento[individuo_ocupante_destino].posicao = infectante.posicao
self.trocar_status_localizacao(infectante.posicao,(novo_x, novo_y))
chance_infeccao = 1
chance_infeccao_tipo2 = 0.
chance_morte = 0.2
atualizacoes_cura = 10
percentual_inicial_tipo1 = 0.01
percentual_inicial_tipo2 = 0.01
sim = Simulador(
10,
percentual_inicial_tipo1,
percentual_inicial_tipo2,
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,atualizacoes_cura)
#print(sim.lista_matrizes_posicionamento[0])
#print(sim.lista_infectados_tipo_2)
#print(sim.lista_infectados_tipo_1)
#cmap = ListedColormap(['w', 'y', 'yellow', 'red'])
# plt.matshow(sim.lista_matrizes_posicionamento[0].toarray())#, cmap = cmap)
# sim.iterar()
# plt.matshow(sim.lista_matrizes_posicionamento[1].toarray())#, cmap = cmap)
# sim.iterar()
# plt.matshow(sim.lista_matrizes_posicionamento[2].toarray())#, cmap = cmap)
# sim.iterar()
# plt.matshow(sim.lista_matrizes_posicionamento[3].toarray())#, cmap = cmap)
# sim.iterar()
# plt.matshow(sim.lista_matrizes_posicionamento[4].toarray())#, cmap = cmap)
# sim.iterar()
# plt.matshow(sim.lista_matrizes_posicionamento[5].toarray())#, cmap = cmap)
# sim.iterar()
# plt.show();
| 41.710306
| 135
| 0.664018
|
acfd83d6e2fee29bddc9baf61931ceb9498a1db6
| 19,000
|
py
|
Python
|
discord/utils.py
|
sirmammingtonham/discord.py
|
4a23c713e9eae46fd351d3abb44d04d795c96e9f
|
[
"MIT"
] | null | null | null |
discord/utils.py
|
sirmammingtonham/discord.py
|
4a23c713e9eae46fd351d3abb44d04d795c96e9f
|
[
"MIT"
] | null | null | null |
discord/utils.py
|
sirmammingtonham/discord.py
|
4a23c713e9eae46fd351d3abb44d04d795c96e9f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2020 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import array
import asyncio
import collections.abc
import unicodedata
import datetime
import functools
import json
import re
import warnings
from base64 import b64encode
from bisect import bisect_left
from email.utils import parsedate_to_datetime
from inspect import isawaitable as _isawaitable
from collections import defaultdict
from operator import attrgetter
from .errors import InvalidArgument
from .object import Object
DISCORD_EPOCH = 1420070400000
MAX_ASYNCIO_SECONDS = 3456000
class cached_property:
def __init__(self, function):
self.function = function
self.__doc__ = getattr(function, '__doc__')
def __get__(self, instance, owner):
if instance is None:
return self
value = self.function(instance)
setattr(instance, self.function.__name__, value)
return value
class CachedSlotProperty:
def __init__(self, name, function):
self.name = name
self.function = function
self.__doc__ = getattr(function, '__doc__')
def __get__(self, instance, owner):
if instance is None:
return self
try:
return getattr(instance, self.name)
except AttributeError:
value = self.function(instance)
setattr(instance, self.name, value)
return value
def cached_slot_property(name):
def decorator(func):
return CachedSlotProperty(name, func)
return decorator
class SequenceProxy(collections.abc.Sequence):
"""Read-only proxy of a Sequence."""
def __init__(self, proxied):
self.__proxied = proxied
def __getitem__(self, idx):
return self.__proxied[idx]
def __len__(self):
return len(self.__proxied)
def __contains__(self, item):
return item in self.__proxied
def __iter__(self):
return iter(self.__proxied)
def __reversed__(self):
return reversed(self.__proxied)
def index(self, value, *args, **kwargs):
return self.__proxied.index(value, *args, **kwargs)
def count(self, value):
return self.__proxied.count(value)
class Bidict(dict):
"""A bi-directional dict"""
_None = object()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
super().update({v:k for k, v in self.items()})
def __setitem__(self, key, value):
# Delete related mappings
# if we have 1 <-> 2 and we set 2 <-> 3, 2 is now unrelated to 1
if key in self:
del self[key]
if value in self:
del self[value]
super().__setitem__(key, value)
super().__setitem__(value, key)
def __delitem__(self, key):
value = super().__getitem__(key)
super().__delitem__(value)
if key == value:
return
super().__delitem__(key)
def to_dict(self):
return super().copy()
def pop(self, k, d=_None):
try:
v = super().pop(k)
super().pop(v, d)
return v
except KeyError:
if d is not self._None:
return d
raise
def popitem(self):
item = super().popitem()
super().__delitem__(item[1])
return item
def setdefault(self, k, d=None):
try:
return self[k]
except KeyError:
if d in self:
return d
self[k] = d
return d
def update(self, *args, **F):
try:
E = args[0]
if callable(getattr(E, 'keys', None)):
for k in E:
self[k] = E[k]
else:
for k,v in E:
self[k] = v
except IndexError:
pass
finally:
for k in F:
self[k] = F[k]
def copy(self):
return self.__class__(super().copy())
# incompatible
# https://docs.python.org/3/library/exceptions.html#NotImplementedError, Note 1
fromkeys = None
class Defaultdict(defaultdict):
def __missing__(self, key):
if self.default_factory is None:
raise KeyError((key,))
self[key] = value = self.default_factory(key)
return value
def parse_time(timestamp):
if timestamp:
return datetime.datetime(*map(int, re.split(r'[^\d]', timestamp.replace('+00:00', ''))))
return None
def deprecated(instead=None):
def actual_decorator(func):
@functools.wraps(func)
def decorated(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
if instead:
fmt = "{0.__name__} is deprecated, use {1} instead."
else:
fmt = '{0.__name__} is deprecated.'
warnings.warn(fmt.format(func, instead), stacklevel=3, category=DeprecationWarning)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return decorated
return actual_decorator
def oauth_url(client_id, permissions=None, guild=None, redirect_uri=None):
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
Parameters
-----------
client_id: :class:`str`
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.Guild`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = 'https://discord.com/oauth2/authorize?client_id={}&scope=bot'.format(client_id)
if permissions is not None:
url = url + '&permissions=' + str(permissions.value)
if guild is not None:
url = url + "&guild_id=" + str(guild.id)
if redirect_uri is not None:
from urllib.parse import urlencode
url = url + "&response_type=code&" + urlencode({'redirect_uri': redirect_uri})
return url
def snowflake_time(id):
"""
Parameters
-----------
id: :class:`int`
The snowflake ID.
Returns
--------
:class:`datetime.datetime`
The creation date in UTC of a Discord snowflake ID."""
return datetime.datetime.utcfromtimestamp(((id >> 22) + DISCORD_EPOCH) / 1000)
def time_snowflake(datetime_obj, high=False):
"""Returns a numeric snowflake pretending to be created at the given date.
When using as the lower end of a range, use ``time_snowflake(high=False) - 1`` to be inclusive, ``high=True`` to be exclusive
When using as the higher end of a range, use ``time_snowflake(high=True)`` + 1 to be inclusive, ``high=False`` to be exclusive
Parameters
-----------
datetime_obj: :class:`datetime.datetime`
A timezone-naive datetime object representing UTC time.
high: :class:`bool`
Whether or not to set the lower 22 bit to high or low.
"""
unix_seconds = (datetime_obj - type(datetime_obj)(1970, 1, 1)).total_seconds()
discord_millis = int(unix_seconds * 1000 - DISCORD_EPOCH)
return (discord_millis << 22) + (2**22-1 if high else 0)
def find(predicate, seq):
"""A helper to return the first element found in the sequence
that meets the predicate. For example: ::
member = discord.utils.find(lambda m: m.name == 'Mighty', channel.guild.members)
would find the first :class:`~discord.Member` whose name is 'Mighty' and return it.
If an entry is not found, then ``None`` is returned.
This is different from :func:`py:filter` due to the fact it stops the moment it finds
a valid entry.
Parameters
-----------
predicate
A function that returns a boolean-like result.
seq: iterable
The iterable to search through.
"""
for element in seq:
if predicate(element):
return element
return None
def get(iterable, **attrs):
r"""A helper that returns the first element in the iterable that meets
all the traits passed in ``attrs``. This is an alternative for
:func:`~discord.utils.find`.
When multiple attributes are specified, they are checked using
logical AND, not logical OR. Meaning they have to meet every
attribute passed in and not one of them.
To have a nested attribute search (i.e. search by ``x.y``) then
pass in ``x__y`` as the keyword argument.
If nothing is found that matches the attributes passed, then
``None`` is returned.
Examples
---------
Basic usage:
.. code-block:: python3
member = discord.utils.get(message.guild.members, name='Foo')
Multiple attribute matching:
.. code-block:: python3
channel = discord.utils.get(guild.voice_channels, name='Foo', bitrate=64000)
Nested attribute matching:
.. code-block:: python3
channel = discord.utils.get(client.get_all_channels(), guild__name='Cool', name='general')
Parameters
-----------
iterable
An iterable to search through.
\*\*attrs
Keyword arguments that denote attributes to search with.
"""
# global -> local
_all = all
attrget = attrgetter
# Special case the single element call
if len(attrs) == 1:
k, v = attrs.popitem()
pred = attrget(k.replace('__', '.'))
for elem in iterable:
if pred(elem) == v:
return elem
return None
converted = [
(attrget(attr.replace('__', '.')), value)
for attr, value in attrs.items()
]
for elem in iterable:
if _all(pred(elem) == value for pred, value in converted):
return elem
return None
def _unique(iterable):
seen = set()
adder = seen.add
return [x for x in iterable if not (x in seen or adder(x))]
def _get_as_snowflake(data, key):
try:
value = data[key]
except KeyError:
return None
else:
return value and int(value)
def _get_mime_type_for_image(data):
if data.startswith(b'\x89\x50\x4E\x47\x0D\x0A\x1A\x0A'):
return 'image/png'
elif data[0:3] == b'\xff\xd8\xff' or data[6:10] in (b'JFIF', b'Exif'):
return 'image/jpeg'
elif data.startswith((b'\x47\x49\x46\x38\x37\x61', b'\x47\x49\x46\x38\x39\x61')):
return 'image/gif'
elif data.startswith(b'RIFF') and data[8:12] == b'WEBP':
return 'image/webp'
else:
raise InvalidArgument('Unsupported image type given')
def _bytes_to_base64_data(data):
fmt = 'data:{mime};base64,{data}'
mime = _get_mime_type_for_image(data)
b64 = b64encode(data).decode('ascii')
return fmt.format(mime=mime, data=b64)
def to_json(obj):
return json.dumps(obj, separators=(',', ':'), ensure_ascii=True)
def _parse_ratelimit_header(request, *, use_clock=False):
reset_after = request.headers.get('X-Ratelimit-Reset-After')
if use_clock or not reset_after:
utc = datetime.timezone.utc
now = datetime.datetime.now(utc)
reset = datetime.datetime.fromtimestamp(float(request.headers['X-Ratelimit-Reset']), utc)
return (reset - now).total_seconds()
else:
return float(reset_after)
async def maybe_coroutine(f, *args, **kwargs):
value = f(*args, **kwargs)
if _isawaitable(value):
return await value
else:
return value
async def async_all(gen, *, check=_isawaitable):
for elem in gen:
if check(elem):
elem = await elem
if not elem:
return False
return True
async def sane_wait_for(futures, *, timeout):
ensured = [
asyncio.ensure_future(fut) for fut in futures
]
done, pending = await asyncio.wait(ensured, timeout=timeout, return_when=asyncio.ALL_COMPLETED)
if len(pending) != 0:
raise asyncio.TimeoutError()
return done
async def sleep_until(when, result=None):
"""|coro|
Sleep until a specified time.
If the time supplied is in the past this function will yield instantly.
.. versionadded:: 1.3
Parameters
-----------
when: :class:`datetime.datetime`
The timestamp in which to sleep until. If the datetime is naive then
it is assumed to be in UTC.
result: Any
If provided is returned to the caller when the coroutine completes.
"""
if when.tzinfo is None:
when = when.replace(tzinfo=datetime.timezone.utc)
now = datetime.datetime.now(datetime.timezone.utc)
delta = (when - now).total_seconds()
while delta > MAX_ASYNCIO_SECONDS:
await asyncio.sleep(MAX_ASYNCIO_SECONDS)
delta -= MAX_ASYNCIO_SECONDS
return await asyncio.sleep(max(delta, 0), result)
def valid_icon_size(size):
"""Icons must be power of 2 within [16, 4096]."""
return not size & (size - 1) and size in range(16, 4097)
class SnowflakeList(array.array):
"""Internal data storage class to efficiently store a list of snowflakes.
This should have the following characteristics:
- Low memory usage
- O(n) iteration (obviously)
- O(n log n) initial creation if data is unsorted
- O(log n) search and indexing
- O(n) insertion
"""
__slots__ = ()
def __new__(cls, data, *, is_sorted=False):
return array.array.__new__(cls, 'Q', data if is_sorted else sorted(data))
def add(self, element):
i = bisect_left(self, element)
self.insert(i, element)
def get(self, element):
i = bisect_left(self, element)
return self[i] if i != len(self) and self[i] == element else None
def has(self, element):
i = bisect_left(self, element)
return i != len(self) and self[i] == element
_IS_ASCII = re.compile(r'^[\x00-\x7f]+$')
def _string_width(string, *, _IS_ASCII=_IS_ASCII):
"""Returns string's width."""
match = _IS_ASCII.match(string)
if match:
return match.endpos
UNICODE_WIDE_CHAR_TYPE = 'WFA'
width = 0
func = unicodedata.east_asian_width
for char in string:
width += 2 if func(char) in UNICODE_WIDE_CHAR_TYPE else 1
return width
def resolve_invite(invite):
"""
Resolves an invite from a :class:`~discord.Invite`, URL or code.
Parameters
-----------
invite: Union[:class:`~discord.Invite`, :class:`str`]
The invite.
Returns
--------
:class:`str`
The invite code.
"""
from .invite import Invite # circular import
if isinstance(invite, Invite):
return invite.code
else:
rx = r'(?:https?\:\/\/)?discord(?:\.gg|(?:app)?\.com\/invite)\/(.+)'
m = re.match(rx, invite)
if m:
return m.group(1)
return invite
def resolve_template(code):
"""
Resolves a template code from a :class:`~discord.Template`, URL or code.
.. versionadded:: 1.4
Parameters
-----------
code: Union[:class:`~discord.Template`, :class:`str`]
The code.
Returns
--------
:class:`str`
The template code.
"""
from .template import Template # circular import
if isinstance(code, Template):
return code.code
else:
rx = r'(?:https?\:\/\/)?discord(?:\.new|(?:app)?\.com\/template)\/(.+)'
m = re.match(rx, code)
if m:
return m.group(1)
return code
_MARKDOWN_ESCAPE_SUBREGEX = '|'.join(r'\{0}(?=([\s\S]*((?<!\{0})\{0})))'.format(c)
for c in ('*', '`', '_', '~', '|'))
_MARKDOWN_ESCAPE_COMMON = r'^>(?:>>)?\s|\[.+\]\(.+\)'
_MARKDOWN_ESCAPE_REGEX = re.compile(r'(?P<markdown>%s|%s)' % (_MARKDOWN_ESCAPE_SUBREGEX, _MARKDOWN_ESCAPE_COMMON))
def escape_markdown(text, *, as_needed=False, ignore_links=True):
r"""A helper function that escapes Discord's markdown.
Parameters
-----------
text: :class:`str`
The text to escape markdown from.
as_needed: :class:`bool`
Whether to escape the markdown characters as needed. This
means that it does not escape extraneous characters if it's
not necessary, e.g. ``**hello**`` is escaped into ``\*\*hello**``
instead of ``\*\*hello\*\*``. Note however that this can open
you up to some clever syntax abuse. Defaults to ``False``.
ignore_links: :class:`bool`
Whether to leave links alone when escaping markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. This option is not supported with ``as_needed``.
Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters escaped with a slash.
"""
if not as_needed:
url_regex = r'(?P<url><[^: >]+:\/[^ >]+>|(?:https?|steam):\/\/[^\s<]+[^<.,:;\"\'\]\s])'
def replacement(match):
groupdict = match.groupdict()
is_url = groupdict.get('url')
if is_url:
return is_url
return '\\' + groupdict['markdown']
regex = r'(?P<markdown>[_\\~|\*`]|%s)' % _MARKDOWN_ESCAPE_COMMON
if ignore_links:
regex = '(?:%s|%s)' % (url_regex, regex)
return re.sub(regex, replacement, text)
else:
text = re.sub(r'\\', r'\\\\', text)
return _MARKDOWN_ESCAPE_REGEX.sub(r'\\\1', text)
def escape_mentions(text):
"""A helper function that escapes everyone, here, role, and user mentions.
.. note::
This does not include channel mentions.
Parameters
-----------
text: :class:`str`
The text to escape mentions from.
Returns
--------
:class:`str`
The text with the mentions removed.
"""
return re.sub(r'@(everyone|here|[!&]?[0-9]{17,21})', '@\u200b\\1', text)
| 29.874214
| 130
| 0.619789
|
acfd870d6269ee9976f9cb282e69a0655e79c4ff
| 304
|
py
|
Python
|
netmiko/brocade/__init__.py
|
mostau1/netmiko
|
5b5463fb01e39e771be553281748477a48c7391c
|
[
"MIT"
] | null | null | null |
netmiko/brocade/__init__.py
|
mostau1/netmiko
|
5b5463fb01e39e771be553281748477a48c7391c
|
[
"MIT"
] | 8
|
2020-02-05T14:17:32.000Z
|
2021-09-23T23:27:46.000Z
|
netmiko/brocade/__init__.py
|
mostau1/netmiko
|
5b5463fb01e39e771be553281748477a48c7391c
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from netmiko.brocade.brocade_nos_ssh import BrocadeNosSSH
from netmiko.brocade.brocade_fastiron_ssh import BrocadeFastironSSH
from netmiko.brocade.brocade_netiron_ssh import BrocadeNetironSSH
__all__ = ['BrocadeNosSSH', 'BrocadeFastironSSH', 'BrocadeNetironSSH']
| 43.428571
| 70
| 0.871711
|
acfd873e49e604b9b28876c9446bad48e9ae45c0
| 5,614
|
py
|
Python
|
samples/basic/crud/models/cisco-ios-xr/Cisco-IOS-XR-clns-isis-cfg/nc-create-xr-clns-isis-cfg-54-ydk.py
|
maccioni/ydk-py-samples
|
d1758694bef97327c5477e65649326c7595ce499
|
[
"Apache-2.0"
] | 1
|
2021-07-08T14:02:12.000Z
|
2021-07-08T14:02:12.000Z
|
samples/basic/crud/models/cisco-ios-xr/Cisco-IOS-XR-clns-isis-cfg/nc-create-xr-clns-isis-cfg-54-ydk.py
|
maccioni/ydk-py-samples
|
d1758694bef97327c5477e65649326c7595ce499
|
[
"Apache-2.0"
] | null | null | null |
samples/basic/crud/models/cisco-ios-xr/Cisco-IOS-XR-clns-isis-cfg/nc-create-xr-clns-isis-cfg-54-ydk.py
|
maccioni/ydk-py-samples
|
d1758694bef97327c5477e65649326c7595ce499
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Create configuration for model Cisco-IOS-XR-clns-isis-cfg.
usage: nc-create-xr-clns-isis-cfg-54-ydk.py [-h] [-v] device
positional arguments:
device NETCONF device (ssh://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_clns_isis_cfg \
as xr_clns_isis_cfg
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_clns_isis_datatypes \
as xr_clns_isis_datatypes
from ydk.types import Empty
import logging
def config_isis(isis):
"""Add config data to isis object."""
# global configuration
instance = isis.instances.Instance()
instance.instance_name = "DEFAULT"
instance.running = Empty()
instance.is_type = xr_clns_isis_cfg.IsisConfigurableLevels.level1
net = instance.nets.Net()
net.net_name = "49.0000.1720.1625.5001.00"
instance.nets.net.append(net)
isis.instances.instance.append(instance)
# global address family
af = instance.afs.Af()
af.af_name = xr_clns_isis_datatypes.IsisAddressFamily.ipv4
af.saf_name = xr_clns_isis_datatypes.IsisSubAddressFamily.unicast
af.af_data = af.AfData()
metric_style = af.af_data.metric_styles.MetricStyle()
metric_style.style = xr_clns_isis_cfg.IsisMetricStyle.new_metric_style
metric_style.level = xr_clns_isis_datatypes.IsisInternalLevel.not_set
transition_state = xr_clns_isis_cfg.IsisMetricStyleTransition.disabled
metric_style.transition_state = transition_state
af.af_data.metric_styles.metric_style.append(metric_style)
# segment routing
mpls = xr_clns_isis_cfg.IsisLabelPreference.ldp
af.af_data.segment_routing.mpls = mpls
instance.afs.af.append(af)
# loopback interface
interface = instance.interfaces.Interface()
interface.interface_name = "Loopback0"
interface.running = Empty()
interface.state = xr_clns_isis_cfg.IsisInterfaceState.passive
# interface address family
interface_af = interface.interface_afs.InterfaceAf()
interface_af.af_name = xr_clns_isis_datatypes.IsisAddressFamily.ipv4
interface_af.saf_name = xr_clns_isis_datatypes.IsisSubAddressFamily.unicast
interface_af.interface_af_data.running = Empty()
interface.interface_afs.interface_af.append(interface_af)
# segment routing
prefix_sid = interface_af.interface_af_data.PrefixSid()
prefix_sid.type = xr_clns_isis_cfg.Isissid.absolute
prefix_sid.value = 16041
prefix_sid.php = xr_clns_isis_cfg.IsisphpFlag.enable
explicit_null = xr_clns_isis_cfg.IsisexplicitNullFlag.disable
prefix_sid.explicit_null = explicit_null
prefix_sid.nflag_clear = xr_clns_isis_cfg.NflagClear.disable
interface_af.interface_af_data.prefix_sid = prefix_sid
instance.interfaces.interface.append(interface)
# gi0/0/0/0 interface
interface = instance.interfaces.Interface()
interface.interface_name = "GigabitEthernet0/0/0/0"
interface.running = Empty()
interface.point_to_point = Empty()
# interface address familiy
interface_af = interface.interface_afs.InterfaceAf()
interface_af.af_name = xr_clns_isis_datatypes.IsisAddressFamily.ipv4
interface_af.saf_name = xr_clns_isis_datatypes.IsisSubAddressFamily.unicast
interface_af.interface_af_data.running = Empty()
interface.interface_afs.interface_af.append(interface_af)
instance.interfaces.interface.append(interface)
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create NETCONF provider
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
# create CRUD service
crud = CRUDService()
isis = xr_clns_isis_cfg.Isis() # create object
config_isis(isis) # add object configuration
# create configuration on NETCONF device
crud.create(provider, isis)
exit()
# End of script
| 38.986111
| 79
| 0.723905
|
acfd8761a83b021585f391bc0cdb123880b135f3
| 1,700
|
py
|
Python
|
mylocation/migrations/0005_auto_20160216_1419.py
|
chadgates/locmaster
|
2a607341d3444996e24b048bfb5245174aedd45d
|
[
"BSD-3-Clause"
] | null | null | null |
mylocation/migrations/0005_auto_20160216_1419.py
|
chadgates/locmaster
|
2a607341d3444996e24b048bfb5245174aedd45d
|
[
"BSD-3-Clause"
] | null | null | null |
mylocation/migrations/0005_auto_20160216_1419.py
|
chadgates/locmaster
|
2a607341d3444996e24b048bfb5245174aedd45d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-16 14:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mylocation', '0004_delete_portterminal'),
]
operations = [
migrations.RenameField(
model_name='worldborder',
old_name='geom',
new_name='mpoly',
),
migrations.AlterField(
model_name='worldborder',
name='fips',
field=models.CharField(max_length=2, verbose_name='FIPS Code'),
),
migrations.AlterField(
model_name='worldborder',
name='iso2',
field=models.CharField(max_length=2, verbose_name='2 Digit ISO'),
),
migrations.AlterField(
model_name='worldborder',
name='iso3',
field=models.CharField(max_length=3, verbose_name='3 Digit ISO'),
),
migrations.AlterField(
model_name='worldborder',
name='pop2005',
field=models.IntegerField(verbose_name='Population 2005'),
),
migrations.AlterField(
model_name='worldborder',
name='region',
field=models.IntegerField(verbose_name='Region Code'),
),
migrations.AlterField(
model_name='worldborder',
name='subregion',
field=models.IntegerField(verbose_name='Sub-Region Code'),
),
migrations.AlterField(
model_name='worldborder',
name='un',
field=models.IntegerField(verbose_name='United Nations Code'),
),
]
| 30.357143
| 77
| 0.570588
|
acfd8797ebbd165e9a2b4d1e3746008c70480a5d
| 77,862
|
py
|
Python
|
gpMgmt/bin/gppylib/operations/test/unit/test_unit_backup_utils.py
|
henglabs/gpdb
|
09a8cc05ac90d63c64c6d432ca35179b55a461b2
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
gpMgmt/bin/gppylib/operations/test/unit/test_unit_backup_utils.py
|
henglabs/gpdb
|
09a8cc05ac90d63c64c6d432ca35179b55a461b2
|
[
"PostgreSQL",
"Apache-2.0"
] | 6
|
2018-08-04T07:51:37.000Z
|
2018-11-26T07:09:44.000Z
|
gpMgmt/bin/gppylib/operations/test/unit/test_unit_backup_utils.py
|
henglabs/gpdb
|
09a8cc05ac90d63c64c6d432ca35179b55a461b2
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
#
# Copyright (c) Greenplum Inc 2016. All Rights Reserved.
#
import os
import shutil
import unittest
from gppylib.commands.base import CommandResult
from gppylib.gparray import GpArray, GpDB
from gppylib.operations.backup_utils import *
from . import setup_fake_gparray
from mock import patch, Mock
from test.unit.gp_unittest import GpTestCase
class BackupUtilsTestCase(GpTestCase):
def setUp(self):
with patch('gppylib.gparray.GpArray.initFromCatalog', return_value=setup_fake_gparray()):
self.context = Context()
self.context.master_datadir = '/data/master'
self.context.backup_dir = None
self.context.timestamp = '20160101010101'
self.netbackup_filepath = "/tmp/db_dumps/foo"
def test_generate_filename_dump_master_old_format(self):
expected_output = '/data/master/db_dumps/20160101/gp_dump_1_1_20160101010101.gz'
output = self.context.generate_filename("dump", use_old_format=True)
self.assertEquals(output, expected_output)
def test_generate_filename_dump_segment_old_format(self):
dbid = 3
expected_output = '/data/primary1/db_dumps/20160101/gp_dump_0_3_20160101010101.gz'
output = self.context.generate_filename("dump", dbid=dbid, use_old_format=True)
self.assertEquals(output, expected_output)
def test_generate_filename_dump_master_new_format(self):
expected_output = '/data/master/db_dumps/20160101/gp_dump_-1_1_20160101010101.gz'
output = self.context.generate_filename("dump")
self.assertEquals(output, expected_output)
def test_generate_filename_dump_segment_new_format(self):
dbid = 3
expected_output = '/data/primary1/db_dumps/20160101/gp_dump_1_3_20160101010101.gz'
output = self.context.generate_filename("dump", dbid=dbid)
self.assertEquals(output, expected_output)
def test_generate_filename_content_master_new_format_no_standby(self):
expected_output = '/data/master/db_dumps/20160101/gp_dump_-1_\d+_20160101010101.gz'
output = self.context.generate_filename("dump", content=-1)
self.assertEquals(output, expected_output)
def test_generate_filename_content_segment_new_format_no_mirror(self):
content = 1
expected_output = '/data/master/db_dumps/20160101/gp_dump_1_\d+_20160101010101.gz'
del self.context.content_map[5]
output = self.context.generate_filename("dump", content=content)
self.assertEquals(output, expected_output)
def test_generate_filename_content_master_new_format_with_standby(self):
expected_output = '/data/master/db_dumps/20160101/gp_dump_-1_\d+_20160101010101.gz'
self.context.content_map[10] = -1
output = self.context.generate_filename("dump", content=-1)
self.assertEquals(output, expected_output)
def test_generate_filename_content_segment_new_format_with_mirror(self):
content = 1
expected_output = '/data/master/db_dumps/20160101/gp_dump_1_\d+_20160101010101.gz'
output = self.context.generate_filename("dump", content=content)
self.assertEquals(output, expected_output)
def test_generate_filename_content_master_old_format_no_standby(self):
expected_output = '/data/master/db_dumps/20160101/gp_dump_1_(1)_20160101010101.gz'
self.context.use_old_filename_format = True
output = self.context.generate_filename("dump", content=-1)
self.assertEquals(output, expected_output)
def test_generate_filename_content_segment_old_format_no_mirror(self):
content = 1
expected_output = '/data/master/db_dumps/20160101/gp_dump_0_(3)_20160101010101.gz'
self.context.use_old_filename_format = True
del self.context.content_map[5]
output = self.context.generate_filename("dump", content=content)
self.assertEquals(output, expected_output)
def test_generate_filename_content_master_old_format_with_standby(self):
expected_output = '/data/master/db_dumps/20160101/gp_dump_1_(1|10)_20160101010101.gz'
self.context.use_old_filename_format = True
self.context.content_map[10] = -1
output = self.context.generate_filename("dump", content=-1)
self.assertEquals(output, expected_output)
def test_generate_filename_content_segment_old_format_with_mirror(self):
content = 1
expected_output = '/data/master/db_dumps/20160101/gp_dump_0_(3|5)_20160101010101.gz'
self.context.use_old_filename_format = True
output = self.context.generate_filename("dump", content=content)
self.assertEquals(output, expected_output)
def test_generate_filename_different_backup_dir(self):
self.context.backup_dir = '/data/masterdomain'
expected_output = '/data/masterdomain/db_dumps/20160101/gp_dump_20160101010101_schema'
output = self.context.generate_filename("schema")
self.assertEquals(output, expected_output)
def test_generate_filename_no_mdd(self):
self.context.master_datadir = None
self.context.backup_dir = '/data/masterdomain'
expected_output = '/data/masterdomain/db_dumps/20160101/gp_dump_20160101010101_schema'
output = self.context.generate_filename("schema")
self.assertEquals(output, expected_output)
def test_generate_filename_no_mdd_or_backup_dir(self):
self.context.master_datadir = None
with self.assertRaisesRegexp(Exception, 'Cannot locate backup directory with existing parameters'):
self.context.generate_filename("schema")
def test_generate_filename_no_timestamp(self):
self.context.timestamp = None
with self.assertRaisesRegexp(Exception, 'Cannot locate backup directory without timestamp'):
self.context.generate_filename("schema")
def test_generate_filename_bad_timestamp(self):
self.context.timestamp = 'xx160101010101'
with self.assertRaisesRegexp(Exception, 'Invalid timestamp'):
self.context.generate_filename("schema")
def test_generate_filename_short_timestamp(self):
self.context.timestamp = '2016'
with self.assertRaisesRegexp(Exception, 'Invalid timestamp'):
self.context.generate_filename("schema")
def test_validate_timestamp_default(self):
ts = "20160101010101"
result = validate_timestamp(ts)
self.assertTrue(result)
def test_validate_timestamp_too_short(self):
ts = "2016010101010"
result = validate_timestamp(ts)
self.assertFalse(result)
def test_validate_timestamp_too_long(self):
ts = "201601010101010"
result = validate_timestamp(ts)
self.assertFalse(result)
def test_validate_timestamp_zero(self):
ts = "00000000000000"
result = validate_timestamp(ts)
self.assertTrue(result)
def test_validate_timestamp_hex(self):
ts = "0a000000000000"
result = validate_timestamp(ts)
self.assertFalse(result)
def test_validate_timestamp_leading_space(self):
ts = " 00000000000000"
result = validate_timestamp(ts)
self.assertFalse(result)
def test_validate_timestamp_trailing_space(self):
ts = "00000000000000 "
result = validate_timestamp(ts)
self.assertFalse(result);
def test_validate_timestamp_none(self):
ts = None
result = validate_timestamp(ts)
self.assertFalse(result)
def test_generate_filename_with_timestamp(self):
ts = '20150101010101'
expected_output = '/data/master/db_dumps/20150101/gp_dump_20150101010101_increments'
output = self.context.generate_filename("increments", timestamp=ts)
self.assertEquals(output, expected_output)
def test_generate_filename_with_ddboost(self):
self.context.ddboost = True
self.context.backup_dir = "/tmp"
expected_output = '/data/master/db_dumps/20160101/gp_dump_20160101010101_increments'
output = self.context.generate_filename("increments")
self.assertEquals(output, expected_output)
@patch('os.path.exists', side_effect=[True])
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=['/tmp/db_dumps/20160101'])
@patch('gppylib.operations.backup_utils.get_lines_from_file',
return_value=['BackupFile /data/master/db_dumps/20160101/gp_dump_1_1_20160101010101.gz: Succeeded'])
def test_is_timestamp_in_old_format_old(self, mock1, mock2, mock3):
self.assertTrue(self.context.is_timestamp_in_old_format())
@patch('os.path.exists', side_effect=[True])
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=['/tmp/db_dumps/20160101'])
@patch('gppylib.operations.backup_utils.get_lines_from_file',
return_value=['BackupFile /data/master/db_dumps/20160101/gp_dump_-1_1_20160101010101.gz: Succeeded'])
def test_is_timestamp_in_old_format_new(self, mock1, mock2, mock3):
self.assertFalse(self.context.is_timestamp_in_old_format())
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=['/tmp/db_dumps/20160101'])
@patch('gppylib.operations.backup_utils.get_lines_from_file',
return_value=['BackupFile /data/master/db_dumps/20160101/gp_dump_1_1_20160101010101.gz: Succeeded'])
@patch('gppylib.operations.backup_utils.restore_file_with_nbu')
def test_is_timestamp_in_old_format_old_nbu(self, mock1, mock2, mock3):
self.context.netbackup_service_host = "netbackup-service"
self.assertTrue(self.context.is_timestamp_in_old_format())
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=['/tmp/db_dumps/20160101'])
@patch('gppylib.operations.backup_utils.get_lines_from_file',
return_value=['BackupFile /data/master/db_dumps/20160101/gp_dump_-1_1_20160101010101.gz: Succeeded'])
@patch('gppylib.operations.backup_utils.restore_file_with_nbu')
def test_is_timestamp_in_old_format_new_nbu(self, mock1, mock2, mock3):
self.context.netbackup_service_host = "netbackup-service"
self.assertFalse(self.context.is_timestamp_in_old_format())
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=[])
def test_is_timestamp_in_old_format_no_dump_dirs(self, mock1):
with self.assertRaisesRegexp(Exception, "Unable to locate report file for timestamp"):
self.context.is_timestamp_in_old_format()
@patch('os.path.exists', side_effect=[False])
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=['/tmp/db_dumps/20160101'])
def test_is_timestamp_in_old_format_no_report_file(self, mock1, mock2):
with self.assertRaisesRegexp(Exception, "Unable to locate report file for timestamp"):
self.context.is_timestamp_in_old_format()
@patch('os.path.exists', side_effect=[True])
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=['/tmp/db_dumps/20160101'])
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=[''])
def test_is_timestamp_in_old_format_empty_report_file(self, mock1, mock2, mock3):
self.assertFalse(self.context.is_timestamp_in_old_format())
@patch('os.path.exists', side_effect=[True])
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=['/tmp/db_dumps/20160101'])
@patch('gppylib.operations.backup_utils.get_lines_from_file',
return_value=['BackupFile /backup/DCA/20160101/gp_dump_1_1_20160101010101.gz: Succeeded'])
def test_is_timestamp_in_old_format_wrong_path(self, mock1, mock2, mock3):
self.assertTrue(self.context.is_timestamp_in_old_format())
@patch('glob.glob', return_value=['/data/master/db_dumps/20160101/gp_dump_1_1_20160101010102.gz','/data/master/db_dumps/20160101/gp_dump_1_1_20160101010101.gz'])
def test_get_filename_for_content_old_format_master_exists(self, mock1):
self.context.use_old_filename_format=True
filename = get_filename_for_content(self.context, "metadata", -1)
self.assertEquals(filename, '/data/master/db_dumps/20160101/gp_dump_1_1_20160101010101.gz')
@patch('glob.glob', return_value=['/data/master/db_dumps/20160101/gp_dump_-1_1_20160101010102.gz','/data/master/db_dumps/20160101/gp_dump_-1_1_20160101010101.gz'])
def test_get_filename_for_content_new_format_master_exists(self, mock1):
self.context.use_old_filename_format=False
filename = get_filename_for_content(self.context, "metadata", -1)
self.assertEquals(filename, '/data/master/db_dumps/20160101/gp_dump_-1_1_20160101010101.gz')
@patch('glob.glob', return_value=['/data/master/db_dumps/20160101/gp_dump_1_1_20160101010102.gz','/data/master/db_dumps/20160101/gp_dump_0_12_20160101010101.gz'])
def test_get_filename_for_content_old_format_two_digit_dbid(self, mock1):
self.context.use_old_filename_format=True
self.context.content_map[12] = 2
filename = get_filename_for_content(self.context, "dump", 2)
self.assertEquals(filename, '/data/master/db_dumps/20160101/gp_dump_0_12_20160101010101.gz')
@patch('glob.glob', return_value=['/data/master/db_dumps/20160101/gp_dump_-1_1_20160101010102.gz','/data/master/db_dumps/20160101/gp_dump_2_12_20160101010101.gz'])
def test_get_filename_for_content_new_format_two_digit_dbid(self, mock1):
self.context.use_old_filename_format=False
filename = get_filename_for_content(self.context, "dump", 2)
self.assertEquals(filename, '/data/master/db_dumps/20160101/gp_dump_2_12_20160101010101.gz')
@patch('glob.glob', return_value=[])
def test_get_filename_for_content_master_doesnt_exist(self, mock1):
filename = get_filename_for_content(self.context, "metadata", -1)
self.assertEquals(filename, None)
@patch('gppylib.operations.backup_utils.Command.run')
def test_get_filename_for_content_segment_exists(self, mock1):
cmd_mock = Mock()
cmd_mock.rc = 0
cmd_mock.stdout = "['/data/segment/db_dumps/20160101/gp_dump_1_3_20160101010101.gz']"
cmd = Mock()
with patch('gppylib.operations.dump.Command.get_results', return_value=cmd_mock):
filename = get_filename_for_content(self.context, "metadata", 1, '/data/segment/db_dumps/20160101', 'remoteHost')
self.assertEquals(filename, '/data/segment/db_dumps/20160101/gp_dump_1_3_20160101010101.gz')
@patch('gppylib.operations.backup_utils.Command.run')
def test_get_filename_for_content_segment_doesnt_exist(self, mock1):
cmd_mock = Mock()
cmd_mock.rc = 1
cmd_mock.stdout = ''
cmd = Mock()
with patch('gppylib.operations.dump.Command.get_results', return_value=cmd_mock):
filename = get_filename_for_content(self.context, "metadata", 1, '/data/master', 'remoteHost')
self.assertEquals(filename, None)
@patch('gppylib.operations.backup_utils.Command.run')
def test_get_filename_for_content_segment_bad_dir(self, mock1):
cmd_mock = Mock()
cmd_mock.rc = 0
cmd_mock.stdout = ''
cmd = Mock()
with patch('gppylib.operations.dump.Command.get_results', return_value=cmd_mock):
filename = get_filename_for_content(self.context, "metadata", 1, '/tmp', 'remoteHost')
self.assertEquals(filename, None)
def test_get_filename_for_content_segment_remote_dir_no_host(self):
with self.assertRaisesRegexp(Exception, 'Must supply name of remote host to check for metadata file'):
filename = get_filename_for_content(self.context, "metadata", 1, '/data/master')
@patch('gppylib.operations.backup_utils.Context.is_timestamp_in_old_format', return_value=True)
def test_convert_report_filename_to_cdatabase_filename_old_format(self, mock1):
report_file = '/data/master/db_dumps/20160101/gp_dump_20160101010101.rpt'
expected_output = '/data/master/db_dumps/20160101/gp_cdatabase_1_1_20160101010101'
cdatabase_file = convert_report_filename_to_cdatabase_filename(self.context, report_file)
self.assertEquals(expected_output, cdatabase_file)
@patch('gppylib.operations.backup_utils.Context.is_timestamp_in_old_format', return_value=False)
def test_convert_report_filename_to_cdatabase_filename_new_format(self, mock1):
report_file = '/data/master/db_dumps/20160101/gp_dump_20160101010101.rpt'
expected_output = '/data/master/db_dumps/20160101/gp_cdatabase_-1_1_20160101010101'
cdatabase_file = convert_report_filename_to_cdatabase_filename(self.context, report_file)
self.assertEquals(expected_output, cdatabase_file)
@patch('gppylib.operations.backup_utils.Context.is_timestamp_in_old_format', return_value=False)
def test_convert_report_filename_to_cdatabase_filename_empty_file(self, mock1):
report_file = '/data/master/db_dumps/20160101/gp_dump_20160101010101.rpt'
expected_output = '/data/master/db_dumps/20160101/gp_cdatabase_-1_1_20160101010101'
cdatabase_file = convert_report_filename_to_cdatabase_filename(self.context, report_file)
self.assertEquals(expected_output, cdatabase_file)
def test_convert_report_filename_to_cdatabase_filename_no_report_file(self):
report_file = '/data/master/db_dumps/20160101/gp_dump_20160101010101.rpt'
expected_output = '/data/master/db_dumps/20160101/gp_cdatabase_-1_1_20160101010101'
with self.assertRaisesRegexp(Exception, "Unable to locate report file for timestamp"):
cdatabase_file = convert_report_filename_to_cdatabase_filename(self.context, report_file)
@patch('gppylib.operations.backup_utils.Context.is_timestamp_in_old_format', return_value=True)
def test_convert_report_filename_to_cdatabase_filename_with_prefix_old_format(self, mock):
report_file = '/data/master/db_dumps/20160101/bar_gp_dump_20160101010101.rpt'
expected_output = '/data/master/db_dumps/20160101/bar_gp_cdatabase_1_1_20160101010101'
self.context.dump_prefix = 'bar_'
cdatabase_file = convert_report_filename_to_cdatabase_filename(self.context, report_file)
self.assertEquals(expected_output, cdatabase_file)
@patch('gppylib.operations.backup_utils.Context.is_timestamp_in_old_format', return_value=False)
def test_convert_report_filename_to_cdatabase_filename_with_prefix_new_format(self, mock):
report_file = '/data/master/db_dumps/20160101/bar_gp_dump_20160101010101.rpt'
expected_output = '/data/master/db_dumps/20160101/bar_gp_cdatabase_-1_1_20160101010101'
self.context.dump_prefix = 'bar_'
cdatabase_file = convert_report_filename_to_cdatabase_filename(self.context, report_file)
self.assertEquals(expected_output, cdatabase_file)
@patch('gppylib.operations.backup_utils.Context.is_timestamp_in_old_format', return_value=False)
def test_convert_report_filename_to_cdatabase_filename_with_prefix_empty_file(self, mock):
report_file = '/data/master/db_dumps/20160101/bar_gp_dump_20160101010101.rpt'
expected_output = '/data/master/db_dumps/20160101/bar_gp_cdatabase_-1_1_20160101010101'
self.context.dump_prefix = 'bar_'
cdatabase_file = convert_report_filename_to_cdatabase_filename(self.context, report_file)
self.assertEquals(expected_output, cdatabase_file)
@patch('gppylib.operations.backup_utils.Context.is_timestamp_in_old_format', return_value=False)
def test_convert_report_filename_to_cdatabase_filename_ddboost_with_earlier_date(self, mock):
# use the date from the file to calculate the directory, not the current date
report_file = '/data/master/db_dumps/20080101/gp_dump_20080101010101.rpt'
expected_output = '/db_dumps/20080101/gp_cdatabase_-1_1_20080101010101' #path in data domain
self.context.ddboost = True
cdatabase_file = convert_report_filename_to_cdatabase_filename(self.context, report_file)
self.assertEquals(expected_output, cdatabase_file)
def test_convert_report_filename_to_cdatabase_filename_with_prefix_no_report_file(self):
report_file = '/data/master/db_dumps/20160101/bar_gp_dump_20160101010101.rpt'
expected_output = '/data/master/db_dumps/20160101/bar_gp_cdatabase_-1_1_20160101010101'
self.context.dump_prefix = 'bar_'
with self.assertRaisesRegexp(Exception, "Unable to locate report file for timestamp"):
cdatabase_file = convert_report_filename_to_cdatabase_filename(self.context, report_file)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['--', '-- Database creation', '--', '', "CREATE DATABASE bkdb WITH TEMPLATE = template0 ENCODING = 'UTF8' OWNER = dcddev;"])
@patch('gppylib.operations.backup_utils.Context.is_timestamp_in_old_format', return_value=False)
def test_check_cdatabase_exists_default(self, mock1, mock2):
self.context.target_db = 'bkdb'
report_file = '/data/master/db_dumps/20160101/gp_dump_20160101010101.rpt'
result = check_cdatabase_exists(self.context, report_file)
self.assertTrue(result)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['--', '-- Database creation', '--', '', "CREATE DATABASE fullbkdb WITH TEMPLATE = template0 ENCODING = 'UTF8' OWNER = dcddev;"])
def test_check_cdatabase_exists_bad_dbname(self, mock):
self.context.target_db = 'bkdb'
report_file = '/data/master/db_dumps/20160101/gp_dump_20160101010101.rpt'
result = check_cdatabase_exists(self.context, report_file)
self.assertFalse(result)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['--', '-- Database creation', '--', '', "CREATE bkdb WITH TEMPLATE = template0 ENCODING = 'UTF8' OWNER = dcddev;"])
def test_check_cdatabase_exists_no_database(self, mock):
self.context.target_db = 'bkdb'
report_file = '/data/master/db_dumps/20160101/gp_dump_20160101010101.rpt'
result = check_cdatabase_exists(self.context, report_file)
self.assertFalse(result)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=[])
def test_check_cdatabase_exists_empty_file(self, mock):
self.context.target_db = 'bkdb'
report_file = '/data/master/db_dumps/20160101/gp_dump_20160101010101.rpt'
result = check_cdatabase_exists(self.context, report_file)
self.assertFalse(result)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['--', '-- Database creation', '--', '', 'CREATE DATABASE'])
def test_check_cdatabase_exists_no_dbname(self, mock):
self.context.target_db = 'bkdb'
report_file = '/data/master/db_dumps/20160101/gp_dump_20160101010101.rpt'
result = check_cdatabase_exists(self.context, report_file)
self.assertFalse(result)
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.dump.Command.get_results', return_value=CommandResult(0, "CREATE DATABASE", "", True, False))
def test_check_cdatabase_exists_command_result(self, mock1, mock2):
self.context.target_db = 'bkdb'
report_file = '/data/master/db_dumps/20160101/gp_dump_20160101010101.rpt'
self.context.ddboost = True
result = check_cdatabase_exists(self.context, report_file)
self.assertFalse(result)
def test_get_backup_dir_default(self):
expected = '/data/master/db_dumps/20160101'
result = self.context.get_backup_dir()
self.assertTrue(result, expected)
def test_get_backup_dir_different_backup_dir(self):
self.context.backup_dir = '/tmp/foo'
expected = '/tmp/foo/db_dumps/20160101'
result = self.context.get_backup_dir()
self.assertTrue(result, expected)
def test_get_backup_dir_no_mdd(self):
self.context.master_datadir= None
with self.assertRaisesRegexp(Exception, 'Cannot locate backup directory with existing parameters'):
result = self.context.get_backup_dir()
def test_get_backup_dir_bad_timestamp(self):
timestamp = 'a0160101010101'
with self.assertRaisesRegexp(Exception, 'Invalid timestamp'):
result = self.context.get_backup_dir(timestamp)
def test_check_successful_dump_default(self):
successful_dump = check_successful_dump(['gp_dump utility finished successfully.'])
self.assertTrue(successful_dump)
def test_check_successful_dump_failure(self):
successful_dump = check_successful_dump(['gp_dump utility finished unsuccessfully.'])
self.assertFalse(successful_dump)
def test_check_successful_dump_no_result(self):
successful_dump = check_successful_dump([])
self.assertFalse(successful_dump)
def test_check_successful_dump_with_whitespace(self):
successful_dump = check_successful_dump(['gp_dump utility finished successfully.\n'])
self.assertTrue(successful_dump)
@patch('gppylib.operations.backup_utils.check_cdatabase_exists', return_value=True)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Backup Type: Full', 'Timestamp Key: 01234567891234', 'gp_dump utility finished successfully.'])
def test_get_full_ts_from_report_file_default(self, mock1, mock2):
expected_output = '01234567891234'
ts = get_full_ts_from_report_file(self.context, 'foo')
self.assertEqual(ts, expected_output)
@patch('gppylib.operations.backup_utils.check_cdatabase_exists', return_value=False)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Backup Type: Full', 'Timestamp Key: 01234567891234', 'gp_dump utility finished successfully.'])
def test_get_full_ts_from_report_file_no_database(self, mock1, mock2):
expected_output = None
ts = get_full_ts_from_report_file(self.context, 'foo')
self.assertEqual(ts, expected_output)
@patch('gppylib.operations.backup_utils.check_cdatabase_exists', return_value=True)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Backup Type: Full', 'Timestamp Key: 01234567891234567', 'gp_dump utility finished successfully.'])
def test_get_full_ts_from_report_file_timestamp_too_long(self, mock1, mock2):
with self.assertRaisesRegexp(Exception, 'Invalid timestamp value found in report_file'):
get_full_ts_from_report_file(self.context, 'foo')
@patch('gppylib.operations.backup_utils.check_cdatabase_exists', return_value=True)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Backup Type: Full', 'Timestamp Key: xxx34567891234', 'gp_dump utility finished successfully.'])
def test_get_full_ts_from_report_file_bad_timestamp(self, mock1, mock2):
with self.assertRaisesRegexp(Exception, 'Invalid timestamp value found in report_file'):
get_full_ts_from_report_file(self.context, 'foo')
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Backup Type: Full'])
@patch('gppylib.operations.backup_utils.check_cdatabase_exists', return_value=True)
def test_get_full_ts_from_report_file_missing_output(self, mock1, mock2):
expected_output = None
ts = get_full_ts_from_report_file(self.context, 'foo')
self.assertEqual(ts, expected_output)
@patch('gppylib.operations.backup_utils.check_cdatabase_exists', return_value=True)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Backup Type: Full', 'gp_dump utility finished successfully.'])
def test_get_full_ts_from_report_file_missing_timestamp(self, mock1, mock2):
expected_output = None
ts = get_full_ts_from_report_file(self.context, 'foo')
self.assertEqual(ts, expected_output)
@patch('gppylib.operations.backup_utils.check_cdatabase_exists', return_value=True)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Backup Type: Full', 'Timestamp Key: xxx34567891234', 'gp_dump utility finished successfully.'])
def test_get_full_ts_from_report_file_with_ddboost_bad_ts(self, mock1, mock2):
self.context.ddboost = True
with self.assertRaisesRegexp(Exception, 'Invalid timestamp value found in report_file'):
ts = get_full_ts_from_report_file(self.context, 'foo')
@patch('gppylib.operations.backup_utils.check_cdatabase_exists', return_value=True)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Backup Type: Full', 'Timestamp Key: 01234567891234', 'gp_dump utility finished successfully.'])
def test_get_full_ts_from_report_file_with_ddboost_good_ts(self, mock1, mock2):
expected_output = '01234567891234'
self.context.ddboost = True
ts = get_full_ts_from_report_file(self.context, 'foo')
self.assertEqual(ts, expected_output)
@patch('gppylib.operations.backup_utils.check_cdatabase_exists', return_value=True)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Backup Type: Incremental', 'Timestamp Key: 01234567891234', 'gp_dump utility finished successfully.'])
def test_get_incremental_ts_from_report_file_default(self, mock1, mock2):
expected_output = '01234567891234'
ts = get_incremental_ts_from_report_file(self.context, 'foo')
self.assertEqual(ts, expected_output)
@patch('gppylib.operations.backup_utils.check_cdatabase_exists', return_value=True)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Backup Type: Incremental'])
def test_get_incremental_ts_from_report_file_missing_output(self, mock1, mock2):
expected_output = None
ts = get_incremental_ts_from_report_file(self.context, 'foo')
self.assertEqual(ts, expected_output)
@patch('gppylib.operations.backup_utils.check_cdatabase_exists', return_value=True)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Backup Type: Incremental', 'gp_dump utility finished successfully.'])
def test_get_incremental_ts_from_report_file_success(self, mock1, mock2):
expected_output = None
ts = get_incremental_ts_from_report_file(self.context, 'foo')
self.assertEqual(ts, expected_output)
@patch('gppylib.operations.backup_utils.check_cdatabase_exists', return_value=True)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Backup Type: Full', 'Timestamp Key: 01234567891234', 'gp_dump utility finished successfully.'])
def test_get_incremental_ts_from_report_file_full(self, mock1, mock2):
expected_output = None
ts = get_incremental_ts_from_report_file(self.context, 'foo')
self.assertEqual(ts, expected_output)
@patch('gppylib.operations.backup_utils.check_cdatabase_exists', return_value=False)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Backup Type: Incremental', 'Timestamp Key: 01234567891234', 'gp_dump utility finished successfully.'])
def test_get_incremental_ts_from_report_file_no_database(self, mock1, mock2):
expected_output = None
ts = get_incremental_ts_from_report_file(self.context, 'foo')
self.assertEqual(ts, expected_output)
@patch('gppylib.operations.backup_utils.check_cdatabase_exists', return_value=True)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Backup Type: Incremental', 'Timestamp Key: 01234567891234567', 'gp_dump utility finished successfully.'])
def test_get_incremental_ts_from_report_file_timestamp_too_long(self, mock1, mock2):
with self.assertRaisesRegexp(Exception, 'Invalid timestamp value found in report_file'):
get_incremental_ts_from_report_file(self.context, 'foo')
@patch('gppylib.operations.backup_utils.check_cdatabase_exists', return_value=True)
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Backup Type: Incremental', 'Timestamp Key: xxx34567891234', 'gp_dump utility finished successfully.'])
def test_get_incremental_ts_from_report_file_bad_timestamp(self, mock1, mock2):
with self.assertRaisesRegexp(Exception, 'Invalid timestamp value found in report_file'):
get_incremental_ts_from_report_file(self.context, 'foo')
def test_check_backup_type_full(self):
backup_type = check_backup_type(['Backup Type: Full'], 'Full')
self.assertEqual(backup_type, True)
def test_check_backup_type_mismatch(self):
backup_type = check_backup_type(['Backup Type: Incremental'], 'Full')
self.assertEqual(backup_type, False)
def test_check_backup_type_invalid_type(self):
backup_type = check_backup_type(['foo'], 'Full')
self.assertEqual(backup_type, False)
def test_check_backup_type_type_too_long(self):
backup_type = check_backup_type(['Backup Type: FullQ'], 'Full')
self.assertEqual(backup_type, False)
def test_get_timestamp_val_default(self):
ts_key = get_timestamp_val(['Timestamp Key: 01234567891234'])
self.assertEqual(ts_key, '01234567891234')
def test_get_timestamp_val_timestamp_too_short(self):
ts_key = get_timestamp_val(['Time: 00000'])
self.assertEqual(ts_key, None)
def test_get_timestamp_val_bad_timestamp(self):
with self.assertRaisesRegexp(Exception, 'Invalid timestamp value found in report_file'):
get_timestamp_val(['Timestamp Key: '])
@patch('os.path.isdir', return_value=True)
@patch('os.listdir', return_value=['20161212'])
def test_get_dump_dirs_single(self, mock, mock1):
self.context.backup_dir = '/tmp'
expected_output = ['/tmp/db_dumps/20161212']
ddir = self.context.get_dump_dirs()
self.assertEqual(ddir, expected_output)
@patch('os.path.isdir', return_value=True)
@patch('os.listdir', return_value=['20161212', '20161213', '20161214'])
def test_get_dump_dirs_multiple(self, mock, mock1):
self.context.backup_dir = '/tmp'
expected_output = ['20161212', '20161213', '20161214']
ddir = self.context.get_dump_dirs()
self.assertEqual(ddir.sort(), expected_output.sort())
@patch('os.path.isdir', return_value=True)
@patch('os.listdir', return_value=[])
def test_get_dump_dirs_empty(self, mock, mock2):
self.context.backup_dir = '/tmp'
self.assertEquals([], self.context.get_dump_dirs())
@patch('os.path.isdir', return_value=True)
@patch('os.listdir', return_value=['2016120a', '201612121', 'abcde'])
def test_get_dump_dirs_bad_dirs(self, mock, mock2):
self.context.backup_dir = '/tmp'
self.assertEquals([], self.context.get_dump_dirs())
@patch('os.listdir', return_value=['11111111', '20161201']) # Second file shouldn't be picked up, pretend it's a file
@patch('os.path.isdir', side_effect=[True, True, False]) # First value verifies dump dir exists, second and third are for the respective date dirs above
def test_get_dump_dirs_file_not_dir(self, mock, mock2):
self.context.backup_dir = '/tmp'
expected_output = ['/tmp/db_dumps/11111111']
ddir = self.context.get_dump_dirs()
self.assertEqual(ddir, expected_output)
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=['20161212', '20161213', '20161214'])
@patch('os.listdir', return_value=['gp_cdatabase_-1_1_20161212111111', 'gp_dump_20161212000000.rpt', 'gp_cdatabase_-1_1_20161212000001'])
@patch('gppylib.operations.backup_utils.get_full_ts_from_report_file', return_value=['000000'])
def test_get_latest_full_dump_timestamp_default(self, mock1, mock2, mock3):
expected_output = ['000000']
ts = get_latest_full_dump_timestamp(self.context)
self.assertEqual(ts, expected_output)
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=[])
def test_get_latest_full_dump_timestamp_no_full(self, mock1):
with self.assertRaisesRegexp(Exception, 'No full backup found for incremental'):
get_latest_full_dump_timestamp(self.context)
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=['20161212', '20161213', '20161214'])
@patch('os.listdir', return_value=['gp_cdatabase_-1_1_2016121211111', 'gp_cdatabase_-1_1_201612120000010', 'gp_cdatabase_-1_1_2016121a111111'])
def test_get_latest_full_dump_timestamp_bad_timestamp(self, mock1, mock2):
with self.assertRaisesRegexp(Exception, 'No full backup found for incremental'):
ts = get_latest_full_dump_timestamp(self.context)
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=['20161212', '20161213', '20161214'])
@patch('os.listdir', return_value=['gp_cdatabase_-1_1_20161212111111', 'gp_dump_20161212000000.rpt.bk', 'gp_cdatabase_-1_1_20161212000001'])
def test_get_latest_full_dump_timestamp_no_report_file(self, mock1, mock2):
with self.assertRaisesRegexp(Exception, 'No full backup found for incremental'):
ts = get_latest_full_dump_timestamp(self.context)
def test_generate_filename_with_ddboost(self):
expected_output = '/data/master/backup/DCA-35/20160101/gp_dump_20160101010101_last_operation'
self.context.ddboost = True
self.context.dump_dir = 'backup/DCA-35'
output = self.context.generate_filename("last_operation")
self.assertEquals(output, expected_output)
def test_generate_filename_with_env_mdd(self):
timestamp = '20160101010101'
expected_output = '%s/db_dumps/20160101/gp_dump_20160101010101_ao_state_file' % self.context.master_datadir
output = self.context.generate_filename("ao")
self.assertEqual(output, expected_output)
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=['20160930'])
@patch('gppylib.operations.backup_utils.get_latest_report_in_dir', return_value='20160930093000')
def test_get_latest_report_timestamp_default(self, mock1, mock2):
self.context.backup_dir = '/foo'
result = get_latest_report_timestamp(self.context)
self.assertEquals(result, '20160930093000')
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=[])
@patch('gppylib.operations.backup_utils.get_latest_report_in_dir', return_value=[])
def test_get_latest_report_timestamp_no_dirs(self, mock1, mock2):
self.context.backup_dir = '/foo'
result = get_latest_report_timestamp(self.context)
self.assertEquals(result, None)
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=['20160930'])
@patch('gppylib.operations.backup_utils.get_latest_report_in_dir', return_value=None)
def test_get_latest_report_timestamp_no_report_file(self, mock1, mock2):
self.context.backup_dir = '/foo'
result = get_latest_report_timestamp(self.context)
self.assertEquals(result, None)
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=['20160930', '20160929'])
@patch('gppylib.operations.backup_utils.get_latest_report_in_dir', side_effect=[None, '20160929093000'])
def test_get_latest_report_timestamp_multiple_dirs(self, mock1, mock2):
self.context.backup_dir = '/foo'
result = get_latest_report_timestamp(self.context)
self.assertEquals(result, '20160929093000')
@patch('os.listdir', return_value=[])
def test_get_latest_report_in_dir_no_dirs(self, mock1):
bdir = '/foo'
result = get_latest_report_in_dir(bdir, self.context.dump_prefix)
self.assertEquals(result, None)
@patch('os.listdir', return_value=['gp_dump_20130125140013.rpt', 'gp_dump_20160125140013.FOO'])
def test_get_latest_report_in_dir_bad_extension(self, mock1):
bdir = '/foo'
result = get_latest_report_in_dir(bdir, self.context.dump_prefix)
self.assertEquals(result, '20130125140013')
@patch('os.listdir', return_value=['gp_dump_20130125140013.rpt', 'gp_dump_20160125140013.rpt'])
def test_get_latest_report_in_dir_different_years(self, mock1):
bdir = '/foo'
result = get_latest_report_in_dir(bdir, self.context.dump_prefix)
self.assertEquals(result, '20160125140013')
@patch('os.listdir', return_value=['gp_dump_20160125140013.rpt', 'gp_dump_20130125140013.rpt'])
def test_get_latest_report_in_dir_different_years_different_order(self, mock1):
bdir = '/foo'
result = get_latest_report_in_dir(bdir, self.context.dump_prefix)
self.assertEquals(result, '20160125140013')
def test_create_temp_file_with_tables_default(self):
dirty_tables = ['public.t1', 'public.t2', 'testschema.t3']
dirty_file = create_temp_file_with_tables(dirty_tables)
self.assertTrue(os.path.basename(dirty_file).startswith('table_list'))
self.assertTrue(os.path.exists(dirty_file))
content = get_lines_from_file(dirty_file)
self.assertEqual(dirty_tables, content)
os.remove(dirty_file)
def test_create_temp_file_with_tables_no_tables(self):
dirty_tables = ['']
dirty_file = create_temp_file_with_tables(dirty_tables)
self.assertTrue(os.path.basename(dirty_file).startswith('table_list'))
self.assertTrue(os.path.exists(dirty_file))
content = get_lines_from_file(dirty_file)
self.assertEqual(dirty_tables, content)
os.remove(dirty_file)
def test_create_temp_file_from_list_nonstandard_name(self):
dirty_tables = ['public.t1', 'public.t2', 'testschema.t3']
dirty_file = create_temp_file_from_list(dirty_tables, 'dirty_hackup_list_')
self.assertTrue(os.path.basename(dirty_file).startswith('dirty_hackup_list'))
self.assertTrue(os.path.exists(dirty_file))
content = get_lines_from_file(dirty_file)
self.assertEqual(dirty_tables, content)
os.remove(dirty_file)
def test_create_temp_file_from_list_no_tables_different_name(self):
dirty_tables = ['']
dirty_file = create_temp_file_from_list(dirty_tables, 'dirty_hackup_list_')
self.assertTrue(os.path.basename(dirty_file).startswith('dirty_hackup_list'))
self.assertTrue(os.path.exists(dirty_file))
content = get_lines_from_file(dirty_file)
self.assertEqual(dirty_tables, content)
os.remove(dirty_file)
def test_get_timestamp_from_increments_filename_default(self):
fname = '/data/master/foo/db_dumps/20130207/gp_dump_20130207133000_increments'
ts = get_timestamp_from_increments_filename(fname, self.context.dump_prefix)
self.assertEquals(ts, '20130207133000')
def test_get_timestamp_from_increments_filename_bad_file(self):
fname = '/data/master/foo/db_dumps/20130207/gpdump_20130207133000_increments'
with self.assertRaisesRegexp(Exception, 'Invalid increments file'):
get_timestamp_from_increments_filename(fname, self.context.dump_prefix)
@patch('glob.glob', return_value=[])
def test_get_full_timestamp_for_incremental_no_backup(self, mock1):
self.context.backup_dir = 'home'
with self.assertRaisesRegexp(Exception, "Could not locate full backup associated with timestamp '20160101010101'. Either increments file or full backup is missing."):
get_full_timestamp_for_incremental(self.context)
@patch('glob.glob', return_value=['foo'])
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=[])
def test_get_full_timestamp_for_incremental_bad_files(self, mock1, mock2):
self.context.backup_dir = 'home'
with self.assertRaisesRegexp(Exception, "Could not locate full backup associated with timestamp '20160101010101'. Either increments file or full backup is missing."):
get_full_timestamp_for_incremental(self.context)
@patch('glob.glob', return_value=['/tmp/db_dumps/20130207/gp_dump_20130207093000_increments'])
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['20130207133001', '20130207133000'])
@patch('os.path.exists', return_value = True)
def test_get_full_timestamp_for_incremental_default(self, mock1, mock2, mock3):
self.context.timestamp = '20130207133000'
full_ts = get_full_timestamp_for_incremental(self.context)
self.assertEquals(full_ts, '20130207093000')
def test_check_funny_chars_in_names_exclamation_mark(self):
tablenames = ['hello! world', 'correct']
with self.assertRaisesRegexp(Exception, 'Name has an invalid character'):
check_funny_chars_in_names(tablenames)
def test_check_funny_chars_in_names_newline(self):
tablenames = ['hello\nworld', 'propertablename']
with self.assertRaisesRegexp(Exception, 'Name has an invalid character'):
check_funny_chars_in_names(tablenames)
def test_check_funny_chars_in_names_default(self):
tablenames = ['helloworld', 'propertablename']
check_funny_chars_in_names(tablenames) #should not raise an exception
def test_check_funny_chars_in_names_comma(self):
tablenames = ['hello, world', 'correct']
with self.assertRaisesRegexp(Exception, 'Name has an invalid character'):
check_funny_chars_in_names(tablenames)
def test_expand_partition_tables_do_nothing(self):
self.assertEqual(expand_partition_tables('foo', None), None)
@patch('gppylib.operations.backup_utils.dbconn.execSQL')
@patch('gppylib.operations.backup_utils.dbconn.connect')
@patch('pygresql.pgdb.pgdbCursor.fetchall', return_value=[['public', 'tl1'], ['public', 'tl2']])
def test_expand_partition_tables_default(self, mock1, mock2, mock3):
self.context.target_db = 'foo'
restore_tables = ['public.t1', 'public.t2']
expected_output = ['public.tl1', 'public.tl2', 'public.t2']
result = expand_partition_tables(self.context, restore_tables)
self.assertEqual(result.sort(), expected_output.sort())
@patch('gppylib.operations.backup_utils.dbconn.execSQL')
@patch('gppylib.operations.backup_utils.dbconn.connect')
@patch('pygresql.pgdb.pgdbCursor.fetchall', return_value=[])
def test_expand_partition_tables_no_change(self, mock1, mock2, mock3):
self.context.target_db = 'foo'
restore_tables = ['public.t1', 'public.t2']
expected_output = ['public.t1', 'public.t2']
result = expand_partition_tables(self.context, restore_tables)
self.assertEqual(result.sort(), expected_output.sort())
def test_populate_filter_tables_all_part_tables(self):
table = 'public.t1'
rows = [['public', 't1'], ['public', 't2'], ['public', 't3']]
non_partition_tables = []
partition_leaves = []
self.assertEqual(populate_filter_tables(table, rows, non_partition_tables, partition_leaves),
(([], ['public.t1', 'public.t2', 'public.t3'])))
def test_populate_filter_tables_no_part_tables(self):
table = 'public.t1'
rows = []
non_partition_tables = []
partition_leaves = []
self.assertEqual(populate_filter_tables(table, rows, non_partition_tables, partition_leaves),
((['public.t1'], [])))
@patch('gppylib.operations.backup_utils.expand_partition_tables', return_value=['public.t1_p1', 'public.t1_p2', 'public.t1_p3', 'public.t2', 'public.t3'])
def test_expand_partitions_and_populate_filter_file_part_tables(self, mock):
dbname = 'bkdb'
partition_list = ['public.t1', 'public.t2', 'public.t3']
file_prefix = 'include_dump_tables_file'
expected_output = ['public.t2', 'public.t3', 'public.t1', 'public.t1_p1', 'public.t1_p2', 'public.t1_p3']
result = expand_partitions_and_populate_filter_file(dbname, partition_list, file_prefix)
self.assertTrue(os.path.basename(result).startswith(file_prefix))
self.assertTrue(os.path.exists(result))
contents = get_lines_from_file(result)
self.assertEqual(contents.sort(), expected_output.sort())
os.remove(result)
@patch('gppylib.operations.backup_utils.expand_partition_tables', return_value=['public.t1', 'public.t2', 'public.t3'])
def test_expand_partitions_and_populate_filter_file_no_part_tables(self, mock):
dbname = 'bkdb'
partition_list = ['public.t1', 'public.t2', 'public.t3']
file_prefix = 'exclude_dump_tables_file'
result = expand_partitions_and_populate_filter_file(dbname, partition_list, file_prefix)
self.assertTrue(os.path.basename(result).startswith(file_prefix))
self.assertTrue(os.path.exists(result))
contents = get_lines_from_file(result)
self.assertEqual(contents.sort(), partition_list.sort())
os.remove(result)
@patch('gppylib.operations.backup_utils.expand_partition_tables', return_value=[])
def test_expand_partitions_and_populate_filter_file_no_tables(self, mock):
dbname = 'bkdb'
partition_list = ['part_table']
file_prefix = 'exclude_dump_tables_file'
result = expand_partitions_and_populate_filter_file(dbname, partition_list, file_prefix)
self.assertTrue(os.path.basename(result).startswith(file_prefix))
self.assertTrue(os.path.exists(result))
contents = get_lines_from_file(result)
self.assertEqual(contents.sort(), partition_list.sort())
os.remove(result)
def test_get_batch_from_list_default(self):
batch = 1000
length = 3033
expected = [(0,1000), (1000,2000), (2000,3000), (3000,4000)]
indices = get_batch_from_list(length, batch)
self.assertEqual(expected, indices)
def test_get_batch_from_list_one_job(self):
batch = 1000
length = 1
expected = [(0,1000)]
indices = get_batch_from_list(length, batch)
self.assertEqual(expected, indices)
def test_get_batch_from_list_matching_jobs(self):
batch = 1000
length = 1000
expected = [(0,1000)]
indices = get_batch_from_list(length, batch)
self.assertEqual(expected, indices)
def test_get_batch_from_list_no_jobs(self):
batch = 1000
length = 0
expected = []
indices = get_batch_from_list(length, batch)
self.assertEqual(expected, indices)
def test_get_batch_from_list_more_jobs(self):
batch = 1000
length = 2000
expected = [(0,1000), (1000,2000)]
indices = get_batch_from_list(length, batch)
self.assertEqual(expected, indices)
@patch('gppylib.operations.backup_utils.escape_string', side_effect=['public.ao_table', 'public.co_table'])
def test_list_to_quoted_string_default(self, mock1):
input = ['public.ao_table', 'public.co_table']
expected = "'public.ao_table', 'public.co_table'"
output = list_to_quoted_string(Mock(), input)
self.assertEqual(expected, output)
@patch('gppylib.operations.backup_utils.escape_string', side_effect=[' public.ao_table', 'public.co_table '])
def test_list_to_quoted_string_whitespace(self, mock1):
input = [' public.ao_table', 'public.co_table ']
expected = "' public.ao_table', 'public.co_table '"
output = list_to_quoted_string(Mock(), input)
self.assertEqual(expected, output)
@patch('gppylib.operations.backup_utils.escape_string', return_value='public.ao_table')
def test_list_to_quoted_string_one_table(self, mock1):
input = ['public.ao_table']
expected = "'public.ao_table'"
output = list_to_quoted_string(Mock(), input)
self.assertEqual(expected, output)
def test_list_to_quoted_string_no_tables(self):
input = []
expected = "''"
output = list_to_quoted_string(None, input)
self.assertEqual(expected, output)
def test_generate_filename_with_prefix(self):
self.context.dump_prefix = 'foo_'
expected_output = '/data/master/db_dumps/20160101/%sgp_dump_20160101010101.rpt' % self.context.dump_prefix
output = self.context.generate_filename("report")
self.assertEquals(output, expected_output)
def test_generate_filename_with_prefix_and_ddboost(self):
self.context.dump_prefix = 'foo_'
expected_output = '/data/master/backup/DCA-35/20160101/%sgp_dump_20160101010101.rpt' % self.context.dump_prefix
self.context.ddboost = True
self.context.dump_dir = 'backup/DCA-35'
output = self.context.generate_filename("report")
self.assertEquals(output, expected_output)
@patch('os.listdir', return_value=['bar_gp_dump_20160125140013.rpt', 'foo_gp_dump_20130125140013.rpt'])
def test_get_latest_report_in_dir_with_mixed_prefixes(self, mock1):
bdir = '/foo'
self.context.dump_prefix = 'foo_'
result = get_latest_report_in_dir(bdir, self.context.dump_prefix)
self.assertEquals(result, '20130125140013')
@patch('os.listdir', return_value=['gp_dump_20130125140013.rpt'])
def test_get_latest_report_in_dir_with_no_prefix(self, mock1):
bdir = '/foo'
self.context.dump_prefix = 'foo_'
result = get_latest_report_in_dir(bdir, self.context.dump_prefix)
self.assertEquals(result, None)
@patch('glob.glob', return_value=['/tmp/db_dumps/20130207/foo_gp_dump_20130207093000_increments'])
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['20130207133001', '20130207133000'])
@patch('os.path.exists', return_value = True)
def test_get_full_timestamp_for_incremental_with_prefix_default(self, mock1, mock2, mock3):
self.context.backup_dir = 'home'
self.context.dump_prefix = 'foo_'
self.context.timestamp = '20130207133000'
full_ts = get_full_timestamp_for_incremental(self.context)
self.assertEquals(full_ts, '20130207093000')
@patch('glob.glob', return_value=[])
def test_get_full_timestamp_for_incremental_with_prefix_no_files(self, mock1):
self.context.backup_dir = 'home'
self.context.dump_prefix = 'foo_'
with self.assertRaisesRegexp(Exception, "Could not locate full backup associated with timestamp '20160101010101'. Either increments file or full backup is missing."):
get_full_timestamp_for_incremental(self.context)
@patch('glob.glob', return_value=['foo'])
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=[])
def test_get_full_timestamp_for_incremental_with_prefix_bad_files(self, mock1, mock2):
self.context.backup_dir = 'home'
self.context.dump_prefix = 'foo_'
with self.assertRaisesRegexp(Exception, "Could not locate full backup associated with timestamp '20160101010101'. Either increments file or full backup is missing."):
get_full_timestamp_for_incremental(self.context)
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=['20161212', '20161213', '20161214'])
@patch('os.listdir', return_value=['foo_gp_cdatabase_-1_1_20161212111111', 'foo_gp_dump_20161212000000.rpt', 'foo_gp_cdatabase_-1_1_20161212000001'])
@patch('gppylib.operations.backup_utils.get_full_ts_from_report_file', return_value=['000000'])
def test_get_latest_full_dump_timestamp_with_prefix_multiple_files(self, mock1, mock2, mock3):
expected_output = ['000000']
self.context.dump_prefix = 'foo_'
ts = get_latest_full_dump_timestamp(self.context)
self.assertEqual(ts, expected_output)
@patch('gppylib.operations.backup_utils.Context.get_dump_dirs', return_value=[])
def test_get_latest_full_dump_timestamp_with_prefix_no_backup(self, mock1):
self.context.dump_prefix = 'foo_'
with self.assertRaisesRegexp(Exception, 'No full backup found for incremental'):
get_latest_full_dump_timestamp(self.context)
@patch('gppylib.operations.backup_utils.Command.run')
def test_backup_file_with_nbu_default(self, mock1):
backup_file_with_nbu(self.context, path=self.netbackup_filepath)
@patch('gppylib.operations.backup_utils.Command.run')
def test_backup_file_with_nbu_with_segment(self, mock1):
segment_hostname = "sdw"
backup_file_with_nbu(self.context, path=self.netbackup_filepath, hostname=segment_hostname)
@patch('gppylib.operations.backup_utils.Command.run', side_effect=Exception('Error backing up file to NetBackup'))
def test_backup_file_with_nbu_with_Error(self, mock1):
with self.assertRaisesRegexp(Exception, 'Error backing up file to NetBackup'):
backup_file_with_nbu(self.context, path=self.netbackup_filepath)
@patch('gppylib.operations.backup_utils.Command.run')
def test_backup_file_with_nbu_no_block_size(self, mock1):
self.context.netbackup_block_size = None
backup_file_with_nbu(self.context, path=self.netbackup_filepath)
@patch('gppylib.operations.backup_utils.Command.run', side_effect=Exception('Error backing up file to NetBackup'))
def test_backup_file_with_nbu_no_block_size_with_error(self, mock1):
self.context.netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Error backing up file to NetBackup'):
backup_file_with_nbu(self.context, path=self.netbackup_filepath)
@patch('gppylib.operations.backup_utils.Command.run')
def test_backup_file_with_nbu_with_keyword(self, mock1):
netbackup_keyword = "hello"
backup_file_with_nbu(self.context, path=self.netbackup_filepath)
@patch('gppylib.operations.backup_utils.Command.run')
def test_backup_file_with_nbu_with_keyword_and_segment(self, mock1):
netbackup_keyword = "hello"
segment_hostname = "sdw"
backup_file_with_nbu(self.context, path=self.netbackup_filepath, hostname=segment_hostname)
@patch('gppylib.operations.backup_utils.Command.run')
def test_backup_file_with_nbu_no_block_size_with_keyword_and_segment(self, mock1):
self.context.netbackup_block_size = None
segment_hostname = "sdw"
netbackup_keyword = "hello"
backup_file_with_nbu(self.context, path=self.netbackup_filepath, hostname=segment_hostname)
@patch('gppylib.operations.backup_utils.Command.run')
def test_backup_file_with_nbu_with_keyword_and_segment(self, mock1):
self.context.netbackup_block_size = None
netbackup_keyword = "hello"
backup_file_with_nbu(self.context, path=self.netbackup_filepath)
@patch('gppylib.operations.backup_utils.Command.run')
def test_restore_file_with_nbu_no_block_size_with_segment(self, mock1):
segment_hostname = "sdw"
self.context.netbackup_block_size = None
backup_file_with_nbu(self.context, path=self.netbackup_filepath, hostname=segment_hostname)
@patch('gppylib.operations.backup_utils.Command.run', side_effect=Exception('Error backing up file to NetBackup'))
def test_restore_file_with_nbu_no_block_size_with_segment_and_error(self, mock1):
segment_hostname = "sdw"
self.context.netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Error backing up file to NetBackup'):
restore_file_with_nbu(self.context, path=self.netbackup_filepath, hostname=segment_hostname)
@patch('gppylib.operations.backup_utils.Command.run')
def test_restore_file_with_nbu_big_block_size(self, mock1):
self.context.netbackup_block_size = 1024
restore_file_with_nbu(self.context, path=self.netbackup_filepath)
@patch('gppylib.operations.backup_utils.Command.run')
def test_restore_file_with_nbu_with_segment_and_big_block_size(self, mock1):
segment_hostname = "sdw"
self.context.netbackup_block_size = 2048
restore_file_with_nbu(self.context, path=self.netbackup_filepath, hostname=segment_hostname)
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.dump.Command.get_results', return_value=CommandResult(0, "/tmp/db_dumps/foo", "", True, False))
def test_check_file_dumped_with_nbu_default(self, mock1, mock2):
self.assertTrue(check_file_dumped_with_nbu(self.context, path=self.netbackup_filepath))
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.dump.Command.get_results', return_value=CommandResult(0, "", "", True, False))
def test_check_file_dumped_with_nbu_no_return(self, mock1, mock2):
self.assertFalse(check_file_dumped_with_nbu(self.context, path=self.netbackup_filepath))
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.dump.Command.get_results', return_value=CommandResult(0, "/tmp/db_dumps/foo", "", True, False))
def test_check_file_dumped_with_nbu_with_segment(self, mock1, mock2):
hostname = "sdw"
self.assertTrue(check_file_dumped_with_nbu(self.context, path=self.netbackup_filepath, hostname=hostname))
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.dump.Command.get_results', return_value=CommandResult(0, "", "", True, False))
def test_check_file_dumped_with_nbu_with_segment_and_no_return(self, mock1, mock2):
hostname = "sdw"
self.assertFalse(check_file_dumped_with_nbu(self.context, path=self.netbackup_filepath, hostname=hostname))
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.backup_utils.Command.get_results', return_value=CommandResult(0, "/tmp/gp_dump_20160701000000_increments\n", "", True, False))
@patch('gppylib.operations.backup_utils.restore_file_with_nbu')
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['20160701000000', '20160715000000', '20160804000000'])
def test_get_full_timestamp_for_incremental_with_nbu_default(self, mock1, mock2, mock3, mock4):
self.context.netbackup_block_size = 1024
self.context.timestamp = '20160804000000'
expected_output = '20160701000000'
result = get_full_timestamp_for_incremental_with_nbu(self.context)
self.assertEquals(result, expected_output)
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.backup_utils.Command.get_results', return_value=CommandResult(0, "/tmp/gp_dump_20160701000000_increments\n", "", True, False))
@patch('gppylib.operations.backup_utils.restore_file_with_nbu')
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['20160701000000', '20160715000000'])
def test_get_full_timestamp_for_incremental_with_nbu_no_full_timestamp(self, mock1, mock2, mock3, mock4):
self.context.netbackup_block_size = 1024
self.context.timestamp = '20160804000000'
expected_output = None
result = get_full_timestamp_for_incremental_with_nbu(self.context)
self.assertEquals(result, expected_output)
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.backup_utils.Command.get_results', return_value=CommandResult(0, "", "", True, False))
@patch('gppylib.operations.backup_utils.restore_file_with_nbu')
@patch('gppylib.operations.backup_utils.get_lines_from_file')
def test_get_full_timestamp_for_incremental_with_nbu_empty_file(self, mock1, mock2, mock3, mock4):
self.context.netbackup_block_size = 1024
self.context.timestamp = '20160804000000'
expected_output = None
result = get_full_timestamp_for_incremental_with_nbu(self.context)
self.assertEquals(result, expected_output)
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.backup_utils.Command.get_results', return_value=CommandResult(0, "/tmp/gp_dump_20160701000000_increments\n/tmp/gp_dump_20160801000000_increments\n", "", True, False))
@patch('gppylib.operations.backup_utils.restore_file_with_nbu')
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['20160701000000', '20160715000000'])
def test_get_full_timestamp_for_incremental_with_nbu_later_timestamp(self, mock1, mock2, mock3, mock4):
self.context.netbackup_block_size = 1024
self.context.timestamp = '20160804000000'
expected_output = None
result = get_full_timestamp_for_incremental_with_nbu(self.context)
self.assertEquals(result, expected_output)
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.backup_utils.Command.get_results', return_value=CommandResult(0, "/tmp/gp_dump_20160701000000_increments\n/tmp/gp_dump_20160801000000_increments\n", "", True, False))
@patch('gppylib.operations.backup_utils.restore_file_with_nbu')
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['20160710000000', '20160720000000', '20160804000000'])
def test_get_full_timestamp_for_incremental_with_nbu_multiple_increments(self, mock1, mock2, mock3, mock4):
self.context.netbackup_block_size = 1024
self.context.timestamp = '20160804000000'
expected_output = '20160701000000'
result = get_full_timestamp_for_incremental_with_nbu(self.context)
self.assertEquals(result, expected_output)
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.backup_utils.Command.get_results', return_value=CommandResult(0, "/tmp/foo_gp_dump_20160701000000_increments\n/tmp/foo_gp_dump_20160801000000_increments\n", "", True, False))
@patch('gppylib.operations.backup_utils.restore_file_with_nbu')
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['20160710000000', '20160720000000', '20160804000000'])
def test_get_full_timestamp_for_incremental_with_nbu_with_prefix(self, mock1, mock2, mock3, mock4):
self.context.netbackup_block_size = 1024
self.context.timestamp = '20160804000000'
self.context.dump_prefix = 'foo'
expected_output = '20160701000000'
result = get_full_timestamp_for_incremental_with_nbu(self.context)
self.assertEquals(result, expected_output)
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.backup_utils.Command.get_results', return_value=CommandResult(0, "/tmp/foo_gp_dump_20160701000000_increments\n/tmp/foo_gp_dump_20160801000000_increments\n", "", True, False))
@patch('gppylib.operations.backup_utils.restore_file_with_nbu')
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['20160710000000', '20160720000000'])
def test_get_full_timestamp_for_incremental_with_nbu_no_matching_increment(self, mock1, mock2, mock3, mock4):
self.context.netbackup_block_size = 1024
self.context.timestamp = '20160804000000'
self.context.dump_prefix = 'foo'
expected_output = None
result = get_full_timestamp_for_incremental_with_nbu(self.context)
self.assertEquals(result, expected_output)
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.backup_utils.Command.get_results', return_value=CommandResult(0, "/data/master/gp_dump_20160701000000.rpt\n", "", True, False))
@patch('gppylib.operations.backup_utils.restore_file_with_nbu')
@patch('gppylib.operations.backup_utils.get_full_ts_from_report_file', return_value='20160701000000')
def test_get_latest_full_ts_with_nbu_default(self, mock1, mock2, mock3, mock4):
self.context.netbackup_block_size = 1024
expected_output = '20160701000000'
result = get_latest_full_ts_with_nbu(self.context)
self.assertEquals(result, expected_output)
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.backup_utils.Command.get_results', return_value=CommandResult(0, "/data/master/gp_dump_20160701000000.rpt\n", "", True, False))
@patch('gppylib.operations.backup_utils.restore_file_with_nbu')
@patch('gppylib.operations.backup_utils.get_full_ts_from_report_file', return_value=None)
def test_get_latest_full_ts_with_nbu_no_full(self, mock1, mock2, mock3, mock4):
self.context.netbackup_block_size = 1024
with self.assertRaisesRegexp(Exception, 'No full backup found for given incremental on the specified NetBackup server'):
get_latest_full_ts_with_nbu(self.context)
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.backup_utils.Command.get_results', return_value=CommandResult(0, "", "", True, False))
@patch('gppylib.operations.backup_utils.restore_file_with_nbu')
@patch('gppylib.operations.backup_utils.get_full_ts_from_report_file', return_value=None)
def test_get_latest_full_ts_with_nbu_no_report_file(self, mock1, mock2, mock3, mock4):
self.context.netbackup_block_size = 1024
with self.assertRaisesRegexp(Exception, 'No full backup found for given incremental on the specified NetBackup server'):
get_latest_full_ts_with_nbu(self.context)
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.backup_utils.Command.get_results', return_value=CommandResult(0, "/tmp/gp_dump_20160701000000.rpt\n/tmp/gp_dump_20160720000000.rpt", "", True, False))
@patch('gppylib.operations.backup_utils.restore_file_with_nbu')
@patch('gppylib.operations.backup_utils.get_full_ts_from_report_file', return_value=None)
def test_get_latest_full_ts_with_nbu_empty_report_file(self, mock1, mock2, mock3, mock4):
self.context.netbackup_block_size = 1024
with self.assertRaisesRegexp(Exception, 'No full backup found for given incremental on the specified NetBackup server'):
get_latest_full_ts_with_nbu(self.context)
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.backup_utils.Command.get_results', return_value=CommandResult(0, "/tmp/gp_dump_20160701000000.rpt\n/tmp/gp_dump_20160720000000.rpt", "", True, False))
@patch('gppylib.operations.backup_utils.restore_file_with_nbu')
@patch('gppylib.operations.backup_utils.get_full_ts_from_report_file', return_value='20160701000000')
def test_get_latest_full_ts_with_nbu_default(self, mock1, mock2, mock3, mock4):
self.context.netbackup_block_size = 1024
expected_output = '20160701000000'
result = get_latest_full_ts_with_nbu(self.context)
self.assertEquals(result, expected_output)
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.backup_utils.Command.get_results', return_value=CommandResult(0, "/tmp/gp_dump_20160701000000.rpt\n", "", True, False))
@patch('gppylib.operations.backup_utils.restore_file_with_nbu')
@patch('gppylib.operations.backup_utils.get_full_ts_from_report_file', return_value=None)
def test_get_latest_full_ts_with_nbu_with_prefix(self, mock1, mock2, mock3, mock4):
self.context.netbackup_block_size = 1024
self.context.dump_prefix = 'foo'
expected_output = None
with self.assertRaisesRegexp(Exception, 'No full backup found for given incremental on the specified NetBackup server'):
get_latest_full_ts_with_nbu(self.context)
@patch('gppylib.operations.backup_utils.Command.run')
@patch('gppylib.operations.backup_utils.Command.get_results', return_value=CommandResult(0, "No object matched the specified predicate\n", "", True, False))
@patch('gppylib.operations.backup_utils.restore_file_with_nbu')
@patch('gppylib.operations.backup_utils.get_full_ts_from_report_file', return_value=None)
def test_get_latest_full_ts_with_nbu_no_object(self, mock1, mock2, mock3, mock4):
self.context.netbackup_block_size = 1024
expected_output = None
output = get_latest_full_ts_with_nbu(self.context)
self.assertEquals(output, expected_output)
# Yes, this is hackish, but mocking os.environ.get doesn't work.
def test_init_context_with_no_mdd(self):
old_mdd = os.environ.get('MASTER_DATA_DIRECTORY')
try:
os.environ['MASTER_DATA_DIRECTORY'] = ""
with self.assertRaisesRegexp(Exception, 'Environment Variable MASTER_DATA_DIRECTORY not set!'):
context = Context()
finally:
os.environ['MASTER_DATA_DIRECTORY'] = old_mdd
@patch('gppylib.operations.backup_utils.execSQL')
def test_execute_sql_with_conn(self, execSQL):
cursor = Mock()
cursor.fetchall.return_value = 'queryResults'
execSQL.return_value = cursor
query = "fake query"
conn = Mock()
self.assertEquals('queryResults', execute_sql_with_connection(query, conn))
execSQL.assert_called_with(conn, query)
def test__escapeDoubleQuoteInSQLString(self):
self.assertEqual('MYDATE', escapeDoubleQuoteInSQLString('MYDATE', False))
self.assertEqual('MY""DATE', escapeDoubleQuoteInSQLString('MY"DATE', False))
self.assertEqual('MY\'DATE', escapeDoubleQuoteInSQLString('''MY'DATE''', False))
self.assertEqual('MY""""DATE', escapeDoubleQuoteInSQLString('MY""DATE', False))
self.assertEqual('"MYDATE"', escapeDoubleQuoteInSQLString('MYDATE'))
self.assertEqual('"MY""DATE"', escapeDoubleQuoteInSQLString('MY"DATE'))
self.assertEqual('"MY\'DATE"', escapeDoubleQuoteInSQLString('''MY'DATE'''))
self.assertEqual('"MY""""DATE"', escapeDoubleQuoteInSQLString('MY""DATE'))
@patch('os.walk', return_value=[('path', ['dir1', 'dir2'], ['gp_dump_20160101010101.rpt', 'file2', 'gp_dump_20160101010102.rpt']),
('path2', ['dir3'], ['gp_dump_20160101010103.rpt']),
('path3', ['dir4', 'dir5'], ['file5', 'gp_dump_20160101010104.rpt'])])
def test_get_report_files_and_paths_default(self, mock):
expectedFiles = [('path','gp_dump_20160101010101.rpt'),
('path', 'gp_dump_20160101010102.rpt'),
('path2','gp_dump_20160101010103.rpt'),
('path3','gp_dump_20160101010104.rpt')]
reportFiles = self.context.get_report_files_and_paths("/tmp")
self.assertEqual(expectedFiles, reportFiles)
@patch('os.walk', return_value=[('path', ['dir1', 'dir2'], ['file1', 'file2']),
('path2', ['dir3'], ['file3']),])
def test_get_report_files_and_paths_no_report_files(self, mock):
with self.assertRaisesRegexp(Exception, "No report files located"):
self.context.get_report_files_and_paths("/dump_dir")
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Compression Program: gzip',
'segment 0 (dbid 2) Host host Port 5433 Database testdb BackupFile /gp_dump_0_2_20160101010101: Succeeded'])
def test_get_compress_and_dbname_from_report_file_normal_dbname_compression(self, mock1):
compress, dbname = self.context.get_compress_and_dbname_from_report_file("report_file_name")
self.assertTrue(compress)
self.assertEquals(dbname, 'testdb')
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Compression Program: gzip',
'segment 0 (dbid 2) Host host Port 5433 Database "test""db" BackupFile /gp_dump_0_2_20160101010101: Succeeded'])
def test_get_compress_and_dbname_from_report_file_special_dbname_compression(self, mock1):
compress, dbname = self.context.get_compress_and_dbname_from_report_file("report_file_name")
self.assertTrue(compress)
self.assertEquals(dbname, '"test""db"')
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Compression Program: None',
'segment 0 (dbid 2) Host host Port 5433 Database testdb BackupFile /gp_dump_0_2_20160101010101: Succeeded'])
def test_get_compress_and_dbname_from_report_file_normal_dbname_no_compression(self, mock1):
compress, dbname = self.context.get_compress_and_dbname_from_report_file("report_file_name")
self.assertFalse(compress)
self.assertEquals(dbname, 'testdb')
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Compression Program: None',
'segment 0 (dbid 2) Host host Port 5433 Database "test""db" BackupFile /gp_dump_0_2_20160101010101: Succeeded'])
def test_get_compress_and_dbname_from_report_file_special_dbname_no_compression(self, mock1):
compress, dbname = self.context.get_compress_and_dbname_from_report_file("report_file_name")
self.assertFalse(compress)
self.assertEquals(dbname, '"test""db"')
@patch('gppylib.operations.backup_utils.get_lines_from_file',
return_value=['segment 0 (dbid 2) Host host Port 5433 Database testdb BackupFile /gp_dump_0_2_20160101010101: Succeeded'])
def test_get_compress_and_dbname_from_report_file_no_compression_line_found(self, mock1):
with self.assertRaisesRegexp(Exception, "Could not determine database name and compression type from report file"):
self.context.get_compress_and_dbname_from_report_file("report_file_name")
@patch('gppylib.operations.backup_utils.get_lines_from_file', return_value=['Compression Program: gzip'])
def test_get_compress_and_dbname_from_report_file_no_dbname_line_found(self, mock1):
with self.assertRaisesRegexp(Exception, "Could not determine database name and compression type from report file"):
self.context.get_compress_and_dbname_from_report_file("report_file_name")
| 57.251471
| 208
| 0.743482
|
acfd88cbffd122a6b42a5ea0cd085f6d9bd1492f
| 210
|
py
|
Python
|
test/tests/test_dcp1/test_single_file_to_single_dir.py
|
sjtbham/mpifileutils
|
4ec784108d066abe5060ebb197c1dba83e88bd7d
|
[
"BSD-3-Clause"
] | 97
|
2016-10-10T20:25:27.000Z
|
2021-10-04T15:47:35.000Z
|
test/tests/test_dcp1/test_single_file_to_single_dir.py
|
sjtbham/mpifileutils
|
4ec784108d066abe5060ebb197c1dba83e88bd7d
|
[
"BSD-3-Clause"
] | 389
|
2016-10-10T17:44:05.000Z
|
2022-03-25T09:54:15.000Z
|
test/tests/test_dcp1/test_single_file_to_single_dir.py
|
sjtbham/mpifileutils
|
4ec784108d066abe5060ebb197c1dba83e88bd7d
|
[
"BSD-3-Clause"
] | 39
|
2016-11-21T18:35:42.000Z
|
2022-03-03T05:21:45.000Z
|
#!/usr/bin/env python2
from subprocess import call
def test_dcp1_single_file_to_single_dir():
rc = call("~/mpifileutils/test/legacy/dcp1_tests/test_dcp1_single_file_to_single_dir/test.sh", shell=True)
| 35
| 114
| 0.790476
|
acfd89545e3581ea39c1c1699c03353453ef4279
| 4,375
|
py
|
Python
|
hw4/code/test_autoencoder_noisy.py
|
stegnerw/intelligent_systems
|
46f70dd598666d5236773b137a268075105281a8
|
[
"MIT"
] | null | null | null |
hw4/code/test_autoencoder_noisy.py
|
stegnerw/intelligent_systems
|
46f70dd598666d5236773b137a268075105281a8
|
[
"MIT"
] | 1
|
2021-02-27T13:32:26.000Z
|
2021-02-27T13:32:26.000Z
|
hw4/code/test_autoencoder_noisy.py
|
stegnerw/intelligent_systems
|
46f70dd598666d5236773b137a268075105281a8
|
[
"MIT"
] | null | null | null |
###############################################################################
# Imports
###############################################################################
# Custom imports
from autoencoder import Autoencoder
from settings import *
# External imports
import numpy as np
import pathlib
import matplotlib
import matplotlib.pyplot as plt
def splitClasses(data, labels):
'''Split up dataset by class
Parameters
----------
data, labels : np.ndarray
Arrays of data points and labels
Returns
-------
list
Data split up by class
'''
split_data = list()
for i in range(CLASSES):
split_data.append(list())
for d, l in zip(data, labels):
idx = np.argmax(l)
split_data[idx].append(d)
return split_data
def getLossByClass(autoencoder, data, labels, expected):
'''
Parameters
----------
autoencoder : Autoencoder
Autoencoder for use in loss calculation
data, labels : np.ndarray
Arrays of data points and labels
Returns
-------
float
Loss values by class
'''
loss = list()
split_data = splitClasses(data, labels)
split_expected = splitClasses(expected, labels)
for i, (d, e) in enumerate(zip(split_data, split_expected)):
print(f'Evaluating class {i}')
loss.append(autoencoder.eval(d, e))
return loss
def getSamplePoints(data, n):
'''Get sample points from the given data set
Parameters
----------
data : np.ndarray
Array of data points
n : int
Number of sample points
Returns
-------
np.ndarray
Reduced list of data points
'''
indeces = np.random.choice(np.arange(len(data)), 8, replace=False)
return data[indeces]
def drawSamples(autoencoder, data, num_samples, dir_name, title):
'''Draw the output predictions and save them
Parameters
----------
autoencoder : Autoencoder
Autoencoder for use in inference
data : np.ndarray
Array of data points
num_samples : int
Number of sample points
dir_name : str
Name of the directory to save the images to
title : str
Title of the plot
'''
sample_points = getSamplePoints(data, num_samples)
for i, d in enumerate(sample_points):
d_name = dir_name.joinpath(f'orig_{i}.png')
matplotlib.image.imsave(str(d_name), d.reshape(28, 28, order='F'), cmap='Greys_r')
pred = autoencoder.predict(d, one_hot=False)
p_name = dir_name.joinpath(f'pred_{i}.png')
matplotlib.image.imsave(str(p_name), pred.reshape(28, 28, order='F'), cmap='Greys_r')
# Seed for consistency
np.random.seed(SEED)
# Load best weights back up
autoencoder = Autoencoder(input_size=INPUTS)
weight_files = sorted(AUTO_NOISY_MODEL_DIR.iterdir())
for weight_file in weight_files[:-1]:
autoencoder.addLayer(file_name=weight_file, output=False)
autoencoder.addLayer(file_name=weight_files[-1], output=True)
# Test on all data and draw samples
test_err = autoencoder.eval(noisy_test_data, test_data)
print(f'Test loss: {test_err:0.3f}')
sample_title = 'Autoencoder Sample Outputs'
drawSamples(autoencoder, noisy_test_data, 8, AUTO_NOISY_IMG_DIR, sample_title)
# Graph loss by class
print('Testing train set')
train_loss = getLossByClass(autoencoder, noisy_train_data, train_labels, train_data)
train_loss = [autoencoder.eval(noisy_train_data, train_data)] + train_loss
print('Testing test set')
test_loss = getLossByClass(autoencoder, noisy_test_data, test_labels, test_data)
test_loss = [autoencoder.eval(noisy_test_data, test_data)] + test_loss
x = np.arange(len(train_loss))
labels = ['Overall'] + list(range(len(train_loss) - 1))
plt.figure()
rect_width = 0.35
plt.bar(x-rect_width/2, train_loss, rect_width, label='Train')
plt.bar(x+rect_width/2, test_loss, rect_width, label='Test')
plt.title('Denoising Autoencoder Loss by Class')
plt.xlabel('Class')
plt.xticks(x, labels=labels)
plt.ylabel('Loss')
plt.grid(axis='y')
plt.gca().set_axisbelow(True)
plt.legend(loc='lower right')
plt.tight_layout()
plt.savefig(str(AUTO_NOISY_BAR), bbox_inches='tight', pad_inches=0)
with open(str(AUTO_NOISY_TEST_LOSS), 'w') as loss_f:
loss_f.write(f'{test_err:0.3f}')
| 33.143939
| 94
| 0.642743
|
acfd8aba19d1cc5b5a0f4bbe835815c1f6f45664
| 3,361
|
py
|
Python
|
src/cfnlint/formatters/__init__.py
|
obobrova/cfn-python-lint
|
42c0cd89577a39e903e5ef8a337926cc7ff6822c
|
[
"MIT-0"
] | null | null | null |
src/cfnlint/formatters/__init__.py
|
obobrova/cfn-python-lint
|
42c0cd89577a39e903e5ef8a337926cc7ff6822c
|
[
"MIT-0"
] | 1
|
2020-04-15T16:36:10.000Z
|
2020-04-15T16:36:10.000Z
|
src/cfnlint/formatters/__init__.py
|
obobrova/cfn-python-lint
|
42c0cd89577a39e903e5ef8a337926cc7ff6822c
|
[
"MIT-0"
] | 1
|
2020-01-05T01:05:55.000Z
|
2020-01-05T01:05:55.000Z
|
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import json
from cfnlint.rules import Match
class BaseFormatter(object):
"""Base Formatter class"""
def _format(self, match):
"""Format the specific match"""
def print_matches(self, matches):
"""Output all the matches"""
if not matches:
return None
# Output each match on a separate line by default
output = []
for match in matches:
output.append(self._format(match))
return '\n'.join(output)
class Formatter(BaseFormatter):
"""Generic Formatter"""
def _format(self, match):
"""Format output"""
formatstr = u'{0} {1}\n{2}:{3}:{4}\n'
return formatstr.format(
match.rule.id,
match.message,
match.filename,
match.linenumber,
match.columnnumber
)
class JsonFormatter(BaseFormatter):
"""Json Formatter"""
class CustomEncoder(json.JSONEncoder):
"""Custom Encoding for the Match Object"""
# pylint: disable=E0202
def default(self, o):
if isinstance(o, Match):
if o.rule.id[0] == 'W':
level = 'Warning'
elif o.rule.id[0] == 'I':
level = 'Informational'
else:
level = 'Error'
return {
'Rule': {
'Id': o.rule.id,
'Description': o.rule.description,
'ShortDescription': o.rule.shortdesc,
'Source': o.rule.source_url
},
'Location': {
'Start': {
'ColumnNumber': o.columnnumber,
'LineNumber': o.linenumber,
},
'End': {
'ColumnNumber': o.columnnumberend,
'LineNumber': o.linenumberend,
},
'Path': getattr(o, 'path', None),
},
'Level': level,
'Message': o.message,
'Filename': o.filename,
}
return {'__{}__'.format(o.__class__.__name__): o.__dict__}
def print_matches(self, matches):
# JSON formatter outputs a single JSON object
return json.dumps(
matches, indent=4, cls=self.CustomEncoder,
sort_keys=True, separators=(',', ': '))
class QuietFormatter(BaseFormatter):
"""Quiet Formatter"""
def _format(self, match):
"""Format output"""
formatstr = u'{0} {1}:{2}'
return formatstr.format(
match.rule,
match.filename,
match.linenumber
)
class ParseableFormatter(BaseFormatter):
"""Parseable Formatter"""
def _format(self, match):
"""Format output"""
formatstr = u'{0}:{1}:{2}:{3}:{4}:{5}:{6}'
return formatstr.format(
match.filename,
match.linenumber,
match.columnnumber,
match.linenumberend,
match.columnnumberend,
match.rule.id,
match.message
)
| 28.483051
| 71
| 0.479917
|
acfd8b9451b6f9e95644104e088d6b47fd3131a3
| 3,042
|
py
|
Python
|
try.py
|
jlarrieux/CryptoPriceLambdaCommons
|
8b0cfb00c596125be49788f2d3567b78c4153dc7
|
[
"Apache-2.0"
] | null | null | null |
try.py
|
jlarrieux/CryptoPriceLambdaCommons
|
8b0cfb00c596125be49788f2d3567b78c4153dc7
|
[
"Apache-2.0"
] | null | null | null |
try.py
|
jlarrieux/CryptoPriceLambdaCommons
|
8b0cfb00c596125be49788f2d3567b78c4153dc7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import decimal
import aws_util
import indicator_util
import boto3
import crypto_price_lambda_commons_util
import csv
import pickle
from my_rolling_list import MyRollingList
from my_rolling_list import MyRollingList as MY
from six.moves import urllib
import json
decimal.getcontext().prec = 7
region = "us-east-1"
dynamodb = boto3.client('dynamodb', region_name=region)
parameter_key = '0'
table_name = 'eth-price-hourly-nosql-db'
s3_resource = boto3.resource('s3')
bucket = 'com.jlarrieux.lambda'
key = 'ethereum_daily_closing_prices.pkl'
def main() -> None:
my_rolling_list = aws_util._load_from_s3(bucket=bucket, s3_key=key)
# print(my_rolling_list)
# x = 10
# print(f"last price: {aws_util.get_last_price()}")
# recents = my_rolling_list.get_most_recents(x)
# print(f"with size:{my_rolling_list.size()}\n{x} most recent: {recents}")
# print(indicator_util.calculate_simple_moving_average(recents))
# print_averages(my_rolling_list, 3)
# print_averages(my_rolling_list, 7)
# print_averages(my_rolling_list, 10)
# print_averages(my_rolling_list, 15)
# print_averages(my_rolling_list, 30)
# print_averages(my_rolling_list, 50)
# print_averages(my_rolling_list, 100)
# print_averages(my_rolling_list, 200)
# print_averages(my_rolling_list, 500)
# print_averages(my_rolling_list, 1000)
# print_averages(my_rolling_list, 1500)
print(my_rolling_list.size())
print(my_rolling_list.get_most_recents(14))
def print_averages(my_rolling_list:MyRollingList, value: int) ->None:
print(f"The last {value} day averages were: {crypto_price_lambda_commons_util.format_money_to_string(indicator_util.calculate_simple_moving_average(my_rolling_list.get_most_recents(value)))}")
def load_and_process() -> None:
with open("C:\\Users\mrsea\Documents\Data\ETH-USD.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
i = 0
# my = MY(max=100000)
max = 0
for row in csv_reader:
if line_count == 0:
print(",\t\t ".join(row))
else:
val = float(row[4])
if val > max:
max = val
line_count +=1
print(max)
def price(asset):
val = json.loads(urllib.request.urlopen(f"https://crypto.jlarrieux.com/metric/data?asset={asset}").read().decode())
print(val)
if __name__ == '__main__':
main()
| 33.8
| 196
| 0.707101
|
acfd8d263d6ba73027264a6efe84c311a5edab2c
| 5,684
|
py
|
Python
|
storyboard/api/v1/validations.py
|
Sitcode-Zoograf/storyboard
|
5833f87e20722c524a1e4a0b8e1fb82206fb4e5c
|
[
"Apache-2.0"
] | null | null | null |
storyboard/api/v1/validations.py
|
Sitcode-Zoograf/storyboard
|
5833f87e20722c524a1e4a0b8e1fb82206fb4e5c
|
[
"Apache-2.0"
] | null | null | null |
storyboard/api/v1/validations.py
|
Sitcode-Zoograf/storyboard
|
5833f87e20722c524a1e4a0b8e1fb82206fb4e5c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from storyboard.db.models import CommonLength
USERS_PUT_SCHEMA = {
"name": "user_schema",
"type": "object",
"properties": {
"full_name": {
"type": ["string"],
"minLength": CommonLength.lower_middle_length,
"maxLength": CommonLength.top_large_length
},
"email": {
"type": ["string"],
"minLength": CommonLength.lower_large_length,
"maxLength": CommonLength.top_large_length
},
"openid": {
"type": ["string", "null"],
"maxLength": CommonLength.top_large_length
}
}
}
USERS_POST_SCHEMA = copy.deepcopy(USERS_PUT_SCHEMA)
USERS_POST_SCHEMA["required"] = ["full_name", "email"]
USER_PREFERENCES_POST_SCHEMA = {
"name": "userPreference_schema",
"type": "object",
"patternProperties": {
"^.{3,100}$": {
"type": ["string", "boolean", "number", "null"],
"minLength": CommonLength.lower_short_length,
"maxLength": CommonLength.top_large_length
}
},
"additionalProperties": False
}
TEAMS_PUT_SCHEMA = {
"name": "team_schema",
"type": "object",
"properties": {
"name": {
"type": "string",
"minLength": CommonLength.lower_middle_length,
"maxLength": CommonLength.top_large_length
}
}
}
TEAMS_POST_SCHEMA = copy.deepcopy(TEAMS_PUT_SCHEMA)
TEAMS_POST_SCHEMA["required"] = ["name"]
"""permission_chema is not applied anywhere until permission controller
is implemented"""
PERMISSIONS_PUT_SCHEMA = {
"name": "permission_schema",
"type": "object",
"properties": {
"name": {
"type": "string",
"minLength": CommonLength.lower_middle_length,
"maxLength": CommonLength.top_short_length
},
"codename": {
"type": "string",
"maxLength": CommonLength.top_large_length
}
}
}
PERMISSIONS_POST_SCHEMA = copy.deepcopy(PERMISSIONS_PUT_SCHEMA)
PERMISSIONS_POST_SCHEMA["required"] = ["name", "codename"]
PROJECTS_PUT_SCHEMA = {
"name": "project_schema",
"type": "object",
"properties": {
"name": {
"type": "string",
"minLength": CommonLength.lower_middle_length,
"maxLength": CommonLength.top_middle_length
},
"repo_url": {
"type": ["string", "null"],
"maxLength": CommonLength.top_large_length
}
}
}
PROJECTS_POST_SCHEMA = copy.deepcopy(PROJECTS_PUT_SCHEMA)
PROJECTS_POST_SCHEMA["required"] = ["name"]
PROJECT_GROUPS_PUT_SCHEMA = {
"name": "projectGroup_schema",
"type": "object",
"properties": {
"name": {
"type": "string",
"minLength": CommonLength.lower_middle_length,
"maxLength": CommonLength.top_middle_length
},
"title": {
"type": "string",
"minLength": CommonLength.lower_middle_length,
"maxLength": CommonLength.top_large_length
}
}
}
PROJECT_GROUPS_POST_SCHEMA = copy.deepcopy(PROJECT_GROUPS_PUT_SCHEMA)
PROJECT_GROUPS_POST_SCHEMA["required"] = ["name", "title"]
STORIES_PUT_SCHEMA = {
"name": "story_schema",
"type": "object",
"properties": {
"title": {
"type": "string",
"minLength": CommonLength.lower_large_length,
"maxLength": CommonLength.top_large_length,
}
}
}
STORIES_POST_SCHEMA = copy.deepcopy(STORIES_PUT_SCHEMA)
STORIES_POST_SCHEMA["required"] = ["title"]
TASKS_PUT_SCHEMA = {
"name": "task_schema",
"type": "object",
"properties": {
"title": {
"type": "string",
"minLength": CommonLength.lower_middle_length,
"maxLength": CommonLength.top_large_length
}
}
}
TASKS_POST_SCHEMA = copy.deepcopy(TASKS_PUT_SCHEMA)
TASKS_POST_SCHEMA["required"] = ["title"]
BRANCHES_PUT_SCHEMA = {
"name": "branch_schema",
"type": "object",
"properties": {
"name": {
"type": "string",
"minLength": CommonLength.lower_middle_length,
"maxLength": CommonLength.top_middle_length
}
}
}
BRANCHES_POST_SCHEMA = copy.deepcopy(BRANCHES_PUT_SCHEMA)
BRANCHES_POST_SCHEMA["required"] = ["name"]
MILESTONES_PUT_SCHEMA = {
"name": "milestone_schema",
"type": "object",
"properties": {
"name": {
"type": "string",
"minLength": CommonLength.lower_middle_length,
"maxLength": CommonLength.top_middle_length
}
}
}
MILESTONES_POST_SCHEMA = copy.deepcopy(MILESTONES_PUT_SCHEMA)
MILESTONES_POST_SCHEMA["required"] = ["name"]
STORY_TAGS_PUT_SCHEMA = {
"name": "storyTag_schema",
"type": "object",
"properties": {
"name": {
"type": "string",
"minLength": CommonLength.lower_middle_length,
"maxLength": CommonLength.top_short_length
}
}
}
STORY_TAGS_POST_SCHEMA = copy.deepcopy(STORY_TAGS_PUT_SCHEMA)
STORY_TAGS_POST_SCHEMA["required"] = ["name"]
| 27.326923
| 71
| 0.613476
|
acfd8d595924a541972df94e11a6b45f843311c8
| 64,824
|
py
|
Python
|
edb/server/server.py
|
Xen0byte/edgedb
|
9d8e020503793609547ffa0455e6c2fd8dff5a4d
|
[
"Apache-2.0"
] | null | null | null |
edb/server/server.py
|
Xen0byte/edgedb
|
9d8e020503793609547ffa0455e6c2fd8dff5a4d
|
[
"Apache-2.0"
] | null | null | null |
edb/server/server.py
|
Xen0byte/edgedb
|
9d8e020503793609547ffa0455e6c2fd8dff5a4d
|
[
"Apache-2.0"
] | null | null | null |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import asyncio
import binascii
import collections
import ipaddress
import json
import logging
import os
import pickle
import socket
import ssl
import stat
import struct
import sys
import time
import uuid
import immutables
from edb import errors
from edb.common import devmode
from edb.common import taskgroup
from edb.common import windowedsum
from edb.schema import reflection as s_refl
from edb.schema import roles as s_role
from edb.schema import schema as s_schema
from edb.server import args as srvargs
from edb.server import cache
from edb.server import config
from edb.server import connpool
from edb.server import compiler_pool
from edb.server import defines
from edb.server import protocol
from edb.server.ha import base as ha_base
from edb.server.ha import adaptive as adaptive_ha
from edb.server.protocol import binary # type: ignore
from edb.server import metrics
from edb.server import pgcon
from edb.server.pgcon import errors as pgcon_errors
from . import dbview
ADMIN_PLACEHOLDER = "<edgedb:admin>"
logger = logging.getLogger('edb.server')
log_metrics = logging.getLogger('edb.server.metrics')
class RoleDescriptor(TypedDict):
superuser: bool
name: str
password: str
class StartupError(Exception):
pass
class Server(ha_base.ClusterProtocol):
_sys_pgcon: Optional[pgcon.PGConnection]
_roles: Mapping[str, RoleDescriptor]
_instance_data: Mapping[str, str]
_sys_queries: Mapping[str, str]
_local_intro_query: bytes
_global_intro_query: bytes
_report_config_typedesc: bytes
_report_config_data: bytes
_std_schema: s_schema.Schema
_refl_schema: s_schema.Schema
_schema_class_layout: s_refl.SchemaTypeLayout
_sys_pgcon_waiter: asyncio.Lock
_servers: Mapping[str, asyncio.AbstractServer]
_task_group: Optional[taskgroup.TaskGroup]
_backend_adaptive_ha: Optional[adaptive_ha.AdaptiveHASupport]
_testmode: bool
# We maintain an OrderedDict of all active client connections.
# We use an OrderedDict because it allows to move keys to either
# end of the dict. That's used to keep all active client connections
# grouped at the right end of the dict. The idea is that we can then
# have a periodically run coroutine to GC all inactive connections.
# This should be more economical than maintaining a TimerHandle for
# every open connection. Also, this way, we can react to the
# `session_idle_timeout` config setting changed mid-flight.
_binary_conns: collections.OrderedDict[binary.EdgeConnection, bool]
_idle_gc_handler: asyncio.TimerHandle | None = None
_session_idle_timeout: int | None = None
def __init__(
self,
*,
cluster,
runstate_dir,
internal_runstate_dir,
max_backend_connections,
compiler_pool_size,
nethosts,
netport,
testmode: bool = False,
binary_endpoint_security: srvargs.ServerEndpointSecurityMode = (
srvargs.ServerEndpointSecurityMode.Tls),
http_endpoint_security: srvargs.ServerEndpointSecurityMode = (
srvargs.ServerEndpointSecurityMode.Tls),
auto_shutdown_after: float = -1,
echo_runtime_info: bool = False,
status_sinks: Sequence[Callable[[str], None]] = (),
startup_script: Optional[srvargs.StartupScript] = None,
backend_adaptive_ha: bool = False,
default_auth_method: srvargs.ServerAuthMethod,
):
self.__loop = asyncio.get_running_loop()
self._config_settings = config.get_settings()
# Used to tag PG notifications to later disambiguate them.
self._server_id = str(uuid.uuid4())
# Increase-only counter to reject outdated attempts to connect
self._ha_master_serial = 0
self._serving = False
self._initing = False
self._accept_new_tasks = False
self._cluster = cluster
self._pg_addr = self._get_pgaddr()
inst_params = cluster.get_runtime_params().instance_params
self._tenant_id = inst_params.tenant_id
# 1 connection is reserved for the system DB
pool_capacity = max_backend_connections - 1
self._pg_pool = connpool.Pool(
connect=self._pg_connect,
disconnect=self._pg_disconnect,
max_capacity=pool_capacity,
)
self._pg_unavailable_msg = None
# DB state will be initialized in init().
self._dbindex = None
self._runstate_dir = runstate_dir
self._internal_runstate_dir = internal_runstate_dir
self._max_backend_connections = max_backend_connections
self._compiler_pool = None
self._compiler_pool_size = compiler_pool_size
self._suggested_client_pool_size = max(
min(max_backend_connections,
defines.MAX_SUGGESTED_CLIENT_POOL_SIZE),
defines.MIN_SUGGESTED_CLIENT_POOL_SIZE
)
self._listen_hosts = nethosts
self._listen_port = netport
self._sys_auth: Tuple[Any, ...] = tuple()
# Shutdown the server after the last management
# connection has disconnected
# and there have been no new connections for n seconds
self._auto_shutdown_after = auto_shutdown_after
self._auto_shutdown_handler = None
self._echo_runtime_info = echo_runtime_info
self._status_sinks = status_sinks
self._startup_script = startup_script
# Never use `self.__sys_pgcon` directly; get it via
# `await self._acquire_sys_pgcon()`.
self.__sys_pgcon = None
self._roles = immutables.Map()
self._instance_data = immutables.Map()
self._sys_queries = immutables.Map()
self._devmode = devmode.is_in_dev_mode()
self._testmode = testmode
self._binary_proto_id_counter = 0
self._binary_conns = collections.OrderedDict()
self._accepting_connections = False
self._servers = {}
self._http_query_cache = cache.StatementsCache(
maxsize=defines.HTTP_PORT_QUERY_CACHE_SIZE)
self._http_last_minute_requests = windowedsum.WindowedSum()
self._http_request_logger = None
self._task_group = None
self._stop_evt = asyncio.Event()
self._tls_cert_file = None
self._tls_cert_newly_generated = False
self._sslctx = None
self._default_auth_method = default_auth_method
self._binary_endpoint_security = binary_endpoint_security
self._http_endpoint_security = http_endpoint_security
if backend_adaptive_ha:
self._backend_adaptive_ha = adaptive_ha.AdaptiveHASupport(self)
else:
self._backend_adaptive_ha = None
self._idle_gc_handler = None
self._session_idle_timeout = None
async def _request_stats_logger(self):
last_seen = -1
while True:
current = int(self._http_last_minute_requests)
if current != last_seen:
log_metrics.info(
"HTTP requests in last minute: %d",
current,
)
last_seen = current
await asyncio.sleep(30)
def get_listen_hosts(self):
return self._listen_hosts
def get_listen_port(self):
return self._listen_port
def get_loop(self):
return self.__loop
def in_dev_mode(self):
return self._devmode
def in_test_mode(self):
return self._testmode
def get_pg_dbname(self, dbname: str) -> str:
return self._cluster.get_db_name(dbname)
def on_binary_client_created(self) -> str:
self._binary_proto_id_counter += 1
if self._auto_shutdown_handler:
self._auto_shutdown_handler.cancel()
self._auto_shutdown_handler = None
return str(self._binary_proto_id_counter)
def on_binary_client_connected(self, conn):
self._binary_conns[conn] = True
metrics.current_client_connections.inc()
def on_binary_client_authed(self, conn):
self._report_connections(event='opened')
metrics.total_client_connections.inc()
def on_binary_client_after_idling(self, conn):
try:
self._binary_conns.move_to_end(conn, last=True)
except KeyError:
# Shouldn't happen, but just in case some weird async twist
# gets us here we don't want to crash the connection with
# this error.
metrics.background_errors.inc(1.0, 'client_after_idling')
def on_binary_client_disconnected(self, conn):
self._binary_conns.pop(conn, None)
self._report_connections(event="closed")
metrics.current_client_connections.dec()
if not self._binary_conns and self._auto_shutdown_after >= 0:
def shutdown():
self._accepting_connections = False
self._stop_evt.set()
self._auto_shutdown_handler = self.__loop.call_later(
self._auto_shutdown_after, shutdown)
def _report_connections(self, *, event: str) -> None:
log_metrics.info(
"%s a connection; open_count=%d",
event,
len(self._binary_conns),
)
async def _pg_connect(self, dbname):
ha_serial = self._ha_master_serial
pg_dbname = self.get_pg_dbname(dbname)
started_at = time.monotonic()
try:
rv = await pgcon.connect(
self._get_pgaddr(), pg_dbname, self._tenant_id)
except Exception:
metrics.backend_connection_establishment_errors.inc()
raise
finally:
metrics.backend_connection_establishment_latency.observe(
time.monotonic() - started_at)
if ha_serial == self._ha_master_serial:
rv.set_server(self)
if self._backend_adaptive_ha is not None:
self._backend_adaptive_ha.on_pgcon_made(
dbname == defines.EDGEDB_SYSTEM_DB
)
metrics.total_backend_connections.inc()
metrics.current_backend_connections.inc()
return rv
else:
rv.terminate()
raise ConnectionError("connected to outdated Postgres master")
async def _pg_disconnect(self, conn):
metrics.current_backend_connections.dec()
conn.terminate()
async def init(self):
self._initing = True
try:
self.__sys_pgcon = await self._pg_connect(defines.EDGEDB_SYSTEM_DB)
self._sys_pgcon_waiter = asyncio.Lock()
self._sys_pgcon_ready_evt = asyncio.Event()
self._sys_pgcon_reconnect_evt = asyncio.Event()
await self._load_instance_data()
global_schema = await self.introspect_global_schema()
sys_config = await self.load_sys_config()
await self.load_reported_config()
self._dbindex = dbview.DatabaseIndex(
self,
std_schema=self._std_schema,
global_schema=global_schema,
sys_config=sys_config,
)
self._fetch_roles()
await self._introspect_dbs()
# Now, once all DBs have been introspected, start listening on
# any notifications about schema/roles/etc changes.
await self.__sys_pgcon.listen_for_sysevent()
self.__sys_pgcon.mark_as_system_db()
self._sys_pgcon_ready_evt.set()
self._populate_sys_auth()
if not self._listen_hosts:
self._listen_hosts = (
config.lookup('listen_addresses', sys_config)
or ('localhost',)
)
if self._listen_port is None:
self._listen_port = (
config.lookup('listen_port', sys_config)
or defines.EDGEDB_PORT
)
self._reinit_idle_gc_collector()
finally:
self._initing = False
def _reinit_idle_gc_collector(self) -> float:
if self._auto_shutdown_after >= 0:
return -1
if self._idle_gc_handler is not None:
self._idle_gc_handler.cancel()
self._idle_gc_handler = None
assert self._dbindex is not None
session_idle_timeout = config.lookup(
'session_idle_timeout', self._dbindex.get_sys_config())
timeout = session_idle_timeout.to_microseconds()
timeout /= 1_000_000.0 # convert to seconds
if timeout > 0:
self._idle_gc_handler = self.__loop.call_later(
timeout, self._idle_gc_collector)
return timeout
def _idle_gc_collector(self):
try:
self._idle_gc_handler = None
idle_timeout = self._reinit_idle_gc_collector()
if idle_timeout <= 0:
return
now = time.monotonic()
expiry_time = now - idle_timeout
for conn in self._binary_conns:
try:
if conn.is_idle(expiry_time):
metrics.idle_client_connections.inc()
conn.close_for_idling()
elif conn.is_alive():
# We are sorting connections in
# 'on_binary_client_after_idling' to specifically
# enable this optimization. As soon as we find first
# non-idle active connection we're guaranteed
# to have traversed all of the potentially idling
# connections.
break
except Exception:
metrics.background_errors.inc(1.0, 'close_for_idling')
conn.abort()
except Exception:
metrics.background_errors.inc(1.0, 'idle_clients_collector')
raise
async def _create_compiler_pool(self):
self._compiler_pool = await compiler_pool.create_compiler_pool(
pool_size=self._compiler_pool_size,
dbindex=self._dbindex,
runstate_dir=self._internal_runstate_dir,
backend_runtime_params=self.get_backend_runtime_params(),
std_schema=self._std_schema,
refl_schema=self._refl_schema,
schema_class_layout=self._schema_class_layout,
)
async def _destroy_compiler_pool(self):
if self._compiler_pool is not None:
await self._compiler_pool.stop()
self._compiler_pool = None
def _populate_sys_auth(self):
cfg = self._dbindex.get_sys_config()
auth = config.lookup('auth', cfg) or ()
self._sys_auth = tuple(sorted(auth, key=lambda a: a.priority))
def _get_pgaddr(self):
return self._cluster.get_connection_spec()
def get_compiler_pool(self):
return self._compiler_pool
def get_suggested_client_pool_size(self) -> int:
return self._suggested_client_pool_size
def get_db(self, *, dbname: str):
assert self._dbindex is not None
return self._dbindex.get_db(dbname)
def maybe_get_db(self, *, dbname: str):
assert self._dbindex is not None
return self._dbindex.maybe_get_db(dbname)
def new_dbview(self, *, dbname, user, query_cache):
return self._dbindex.new_view(
dbname, user=user, query_cache=query_cache)
def remove_dbview(self, dbview):
return self._dbindex.remove_view(dbview)
def get_global_schema(self):
return self._dbindex.get_global_schema()
def get_compilation_system_config(self):
return self._dbindex.get_compilation_system_config()
async def acquire_pgcon(self, dbname):
if self._pg_unavailable_msg is not None:
raise errors.BackendUnavailableError(
'Postgres is not available: ' + self._pg_unavailable_msg
)
for _ in range(self._pg_pool.max_capacity + 1):
conn = await self._pg_pool.acquire(dbname)
if conn.is_healthy():
return conn
else:
logger.warning('Acquired an unhealthy pgcon; discard now.')
self._pg_pool.release(dbname, conn, discard=True)
else:
# This is unlikely to happen, but we defer to the caller to retry
# when it does happen
raise errors.BackendUnavailableError(
'No healthy backend connection available at the moment, '
'please try again.'
)
def release_pgcon(self, dbname, conn, *, discard=False):
if not conn.is_healthy():
if not discard:
logger.warning('Released an unhealthy pgcon; discard now.')
discard = True
try:
self._pg_pool.release(dbname, conn, discard=discard)
except Exception:
metrics.background_errors.inc(1.0, 'release_pgcon')
raise
async def load_sys_config(self):
syscon = await self._acquire_sys_pgcon()
try:
query = self.get_sys_query('sysconfig')
sys_config_json = await syscon.parse_execute_json(
query,
b'__backend_sysconfig',
dbver=0,
use_prep_stmt=True,
args=(),
)
finally:
self._release_sys_pgcon()
return config.from_json(config.get_settings(), sys_config_json)
async def reload_sys_config(self):
cfg = await self.load_sys_config()
self._dbindex.update_sys_config(cfg)
self._reinit_idle_gc_collector()
def schedule_reported_config_if_needed(self, setting_name):
setting = self._config_settings[setting_name]
if setting.report:
self.create_task(
self.load_reported_config(), interruptable=True)
def get_report_config_data(self) -> bytes:
return self._report_config_data
async def load_reported_config(self):
syscon = await self._acquire_sys_pgcon()
try:
data = await syscon.parse_execute_extract_single_data_frame(
self.get_sys_query('report_configs'),
b'__report_configs',
dbver=0, use_prep_stmt=True, args=(),
)
self._report_config_data = (
struct.pack('!L', len(self._report_config_typedesc)) +
self._report_config_typedesc +
data
)
except Exception:
metrics.background_errors.inc(1.0, 'load_reported_config')
raise
finally:
self._release_sys_pgcon()
async def introspect_global_schema(self, conn=None):
if conn is not None:
json_data = await conn.parse_execute_json(
self._global_intro_query, b'__global_intro_db',
dbver=0, use_prep_stmt=True, args=(),
)
else:
syscon = await self._acquire_sys_pgcon()
try:
json_data = await syscon.parse_execute_json(
self._global_intro_query, b'__global_intro_db',
dbver=0, use_prep_stmt=True, args=(),
)
finally:
self._release_sys_pgcon()
return s_refl.parse_into(
base_schema=self._std_schema,
schema=s_schema.FlatSchema(),
data=json_data,
schema_class_layout=self._schema_class_layout,
)
async def _reintrospect_global_schema(self):
if not self._initing and not self._serving:
logger.warning(
"global-schema-changes event received during shutdown; "
"ignoring."
)
return
new_global_schema = await self.introspect_global_schema()
self._dbindex.update_global_schema(new_global_schema)
self._fetch_roles()
async def introspect_user_schema(self, conn):
json_data = await conn.parse_execute_json(
self._local_intro_query, b'__local_intro_db',
dbver=0, use_prep_stmt=True, args=(),
)
base_schema = s_schema.ChainedSchema(
self._std_schema,
s_schema.FlatSchema(),
self.get_global_schema(),
)
return s_refl.parse_into(
base_schema=base_schema,
schema=s_schema.FlatSchema(),
data=json_data,
schema_class_layout=self._schema_class_layout,
)
async def introspect_db(self, dbname):
"""Use this method to (re-)introspect a DB.
If the DB is already registered in self._dbindex, its
schema, config, etc. would simply be updated. If it's missing
an entry for it would be created.
All remote notifications of remote events should use this method
to refresh the state. Even if the remote event was a simple config
change, a lot of other events could happen before it was sent to us
by a remote server and us receiving it. E.g. a DB could have been
dropped and recreated again. It's safer to refresh the entire state
than refreshing individual components of it. Besides, DDL and
database-level config modifications are supposed to be rare events.
"""
try:
conn = await self.acquire_pgcon(dbname)
except pgcon_errors.BackendError as e:
if e.code_is(pgcon_errors.ERROR_INVALID_CATALOG_NAME):
# database does not exist (anymore)
logger.warning(
"Detected concurrently-dropped database %s; skipping.",
dbname,
)
if self._dbindex is not None and self._dbindex.has_db(dbname):
self._dbindex.unregister_db(dbname)
return
else:
raise
try:
user_schema = await self.introspect_user_schema(conn)
reflection_cache_json = await conn.parse_execute_json(
b'''
SELECT json_agg(o.c)
FROM (
SELECT
json_build_object(
'eql_hash', t.eql_hash,
'argnames', array_to_json(t.argnames)
) AS c
FROM
ROWS FROM(edgedb._get_cached_reflection())
AS t(eql_hash text, argnames text[])
) AS o;
''',
b'__reflection_cache',
dbver=0,
use_prep_stmt=True,
args=(),
)
reflection_cache = immutables.Map({
r['eql_hash']: tuple(r['argnames'])
for r in json.loads(reflection_cache_json)
})
backend_ids_json = await conn.parse_execute_json(
b'''
SELECT
json_object_agg(
"id"::text,
"backend_id"
)::text
FROM
edgedb."_SchemaType"
''',
b'__backend_ids_fetch',
dbver=0,
use_prep_stmt=True,
args=(),
)
backend_ids = json.loads(backend_ids_json)
db_config = await self.introspect_db_config(conn)
assert self._dbindex is not None
self._dbindex.register_db(
dbname,
user_schema=user_schema,
db_config=db_config,
reflection_cache=reflection_cache,
backend_ids=backend_ids,
refresh=True,
)
finally:
self.release_pgcon(dbname, conn)
async def introspect_db_config(self, conn):
query = self.get_sys_query('dbconfig')
result = await conn.parse_execute_json(
query,
b'__backend_dbconfig',
dbver=0,
use_prep_stmt=True,
args=(),
)
return config.from_json(config.get_settings(), result)
async def _introspect_dbs(self):
syscon = await self._acquire_sys_pgcon()
try:
dbs_query = self.get_sys_query('listdbs')
json_data = await syscon.parse_execute_json(
dbs_query, b'__listdbs',
dbver=0, use_prep_stmt=True, args=(),
)
dbnames = json.loads(json_data)
finally:
self._release_sys_pgcon()
async with taskgroup.TaskGroup(name='introspect DBs') as g:
for dbname in dbnames:
# There's a risk of the DB being dropped by another server
# between us building the list of databases and loading
# information about them.
g.create_task(self.introspect_db(dbname))
def _fetch_roles(self):
global_schema = self._dbindex.get_global_schema()
roles = {}
for role in global_schema.get_objects(type=s_role.Role):
role_name = str(role.get_name(global_schema))
roles[role_name] = {
'name': role_name,
'superuser': role.get_superuser(global_schema),
'password': role.get_password(global_schema),
}
self._roles = immutables.Map(roles)
async def _load_instance_data(self):
syscon = await self._acquire_sys_pgcon()
try:
result = await syscon.simple_query(b'''\
SELECT json FROM edgedbinstdata.instdata
WHERE key = 'instancedata';
''', ignore_data=False)
self._instance_data = immutables.Map(
json.loads(result[0][0].decode('utf-8')))
result = await syscon.simple_query(b'''\
SELECT json FROM edgedbinstdata.instdata
WHERE key = 'sysqueries';
''', ignore_data=False)
queries = json.loads(result[0][0].decode('utf-8'))
self._sys_queries = immutables.Map(
{k: q.encode() for k, q in queries.items()})
result = await syscon.simple_query(b'''\
SELECT text FROM edgedbinstdata.instdata
WHERE key = 'local_intro_query';
''', ignore_data=False)
self._local_intro_query = result[0][0]
result = await syscon.simple_query(b'''\
SELECT text FROM edgedbinstdata.instdata
WHERE key = 'global_intro_query';
''', ignore_data=False)
self._global_intro_query = result[0][0]
result = await syscon.simple_query(b'''\
SELECT bin FROM edgedbinstdata.instdata
WHERE key = 'stdschema';
''', ignore_data=False)
try:
data = binascii.a2b_hex(result[0][0][2:])
self._std_schema = pickle.loads(data)
except Exception as e:
raise RuntimeError(
'could not load std schema pickle') from e
result = await syscon.simple_query(b'''\
SELECT bin FROM edgedbinstdata.instdata
WHERE key = 'reflschema';
''', ignore_data=False)
try:
data = binascii.a2b_hex(result[0][0][2:])
self._refl_schema = pickle.loads(data)
except Exception as e:
raise RuntimeError(
'could not load refl schema pickle') from e
result = await syscon.simple_query(b'''\
SELECT bin FROM edgedbinstdata.instdata
WHERE key = 'classlayout';
''', ignore_data=False)
try:
data = binascii.a2b_hex(result[0][0][2:])
self._schema_class_layout = pickle.loads(data)
except Exception as e:
raise RuntimeError(
'could not load schema class layout pickle') from e
result = await syscon.simple_query(b'''\
SELECT bin FROM edgedbinstdata.instdata
WHERE key = 'report_configs_typedesc';
''', ignore_data=False)
try:
data = binascii.a2b_hex(result[0][0][2:])
assert data is not None
self._report_config_typedesc = data
except Exception as e:
raise RuntimeError(
'could not load report config typedesc') from e
finally:
self._release_sys_pgcon()
def get_roles(self):
return self._roles
async def _restart_servers_new_addr(self, nethosts, netport):
if not netport:
raise RuntimeError('cannot restart without network port specified')
nethosts = await _resolve_interfaces(nethosts)
servers_to_stop = []
servers = {}
if self._listen_port == netport:
hosts_to_start = [
host for host in nethosts if host not in self._servers
]
for host, srv in self._servers.items():
if host == ADMIN_PLACEHOLDER or host in nethosts:
servers[host] = srv
else:
servers_to_stop.append(srv)
admin = False
else:
hosts_to_start = nethosts
servers_to_stop = self._servers.values()
admin = True
if hosts_to_start:
new_servers, *_ = await self._start_servers(
hosts_to_start, netport, admin
)
servers.update(new_servers)
self._servers = servers
self._listen_hosts = nethosts
self._listen_port = netport
addrs = []
unix_addr = None
port = None
for srv in servers_to_stop:
for s in srv.sockets:
addr = s.getsockname()
if isinstance(addr, tuple):
addrs.append(addr)
if port is None:
port = addr[1]
elif port != addr[1]:
port = 0
else:
unix_addr = addr
if len(addrs) > 1:
if port:
addr_str = f"{{{', '.join(addr[0] for addr in addrs)}}}:{port}"
else:
addr_str = f"{{{', '.join('%s:%d' % addr for addr in addrs)}}}"
elif addrs:
addr_str = "%s:%d" % addrs[0]
else:
addr_str = None
if addr_str:
logger.info('Stopping to serve on %s', addr_str)
if unix_addr:
logger.info('Stopping to serve admin on %s', unix_addr)
await self._stop_servers(servers_to_stop)
async def _on_before_drop_db(
self,
dbname: str,
current_dbname: str
) -> None:
if current_dbname == dbname:
raise errors.ExecutionError(
f'cannot drop the currently open database {dbname!r}')
await self._ensure_database_not_connected(dbname)
async def _on_before_create_db_from_template(
self,
dbname: str,
current_dbname: str
):
if current_dbname == dbname:
raise errors.ExecutionError(
f'cannot create database using currently open database '
f'{dbname!r} as a template database')
await self._ensure_database_not_connected(dbname)
async def _ensure_database_not_connected(self, dbname: str):
assert self._dbindex is not None
if self._dbindex.count_connections(dbname):
# If there are open EdgeDB connections to the `dbname` DB
# just raise the error Postgres would have raised itself.
raise errors.ExecutionError(
f'database {dbname!r} is being accessed by other users')
else:
# If, however, there are no open EdgeDB connections, prune
# all non-active postgres connection to the `dbname` DB.
await self._pg_pool.prune_inactive_connections(dbname)
def _on_after_drop_db(self, dbname: str):
try:
assert self._dbindex is not None
self._dbindex.unregister_db(dbname)
except Exception:
metrics.background_errors.inc(1.0, 'on_after_drop_db')
raise
async def _on_system_config_add(self, setting_name, value):
# CONFIGURE INSTANCE INSERT ConfigObject;
pass
async def _on_system_config_rem(self, setting_name, value):
# CONFIGURE INSTANCE RESET ConfigObject;
pass
async def _on_system_config_set(self, setting_name, value):
# CONFIGURE INSTANCE SET setting_name := value;
try:
if setting_name == 'listen_addresses':
await self._restart_servers_new_addr(value, self._listen_port)
elif setting_name == 'listen_port':
await self._restart_servers_new_addr(self._listen_hosts, value)
elif setting_name == 'session_idle_timeout':
self._reinit_idle_gc_collector()
self.schedule_reported_config_if_needed(setting_name)
except Exception:
metrics.background_errors.inc(1.0, 'on_system_config_set')
raise
async def _on_system_config_reset(self, setting_name):
# CONFIGURE INSTANCE RESET setting_name;
try:
if setting_name == 'listen_addresses':
await self._restart_servers_new_addr(
('localhost',), self._listen_port)
elif setting_name == 'listen_port':
await self._restart_servers_new_addr(
self._listen_hosts, defines.EDGEDB_PORT)
elif setting_name == 'session_idle_timeout':
self._reinit_idle_gc_collector()
self.schedule_reported_config_if_needed(setting_name)
except Exception:
metrics.background_errors.inc(1.0, 'on_system_config_reset')
raise
async def _after_system_config_add(self, setting_name, value):
# CONFIGURE INSTANCE INSERT ConfigObject;
try:
if setting_name == 'auth':
self._populate_sys_auth()
except Exception:
metrics.background_errors.inc(1.0, 'after_system_config_add')
raise
async def _after_system_config_rem(self, setting_name, value):
# CONFIGURE INSTANCE RESET ConfigObject;
try:
if setting_name == 'auth':
self._populate_sys_auth()
except Exception:
metrics.background_errors.inc(1.0, 'after_system_config_rem')
raise
async def _after_system_config_set(self, setting_name, value):
# CONFIGURE INSTANCE SET setting_name := value;
pass
async def _after_system_config_reset(self, setting_name):
# CONFIGURE INSTANCE RESET setting_name;
pass
async def _acquire_sys_pgcon(self):
if not self._initing and not self._serving:
raise RuntimeError("EdgeDB server is not serving.")
await self._sys_pgcon_waiter.acquire()
if not self._initing and not self._serving:
self._sys_pgcon_waiter.release()
raise RuntimeError("EdgeDB server is not serving.")
if self.__sys_pgcon is None or not self.__sys_pgcon.is_healthy():
conn, self.__sys_pgcon = self.__sys_pgcon, None
if conn is not None:
self._sys_pgcon_ready_evt.clear()
conn.abort()
# We depend on the reconnect on connection_lost() of __sys_pgcon
await self._sys_pgcon_ready_evt.wait()
if self.__sys_pgcon is None:
self._sys_pgcon_waiter.release()
raise RuntimeError("Cannot acquire pgcon to the system DB.")
return self.__sys_pgcon
def _release_sys_pgcon(self):
self._sys_pgcon_waiter.release()
async def _cancel_pgcon_operation(self, pgcon) -> bool:
syscon = await self._acquire_sys_pgcon()
try:
if pgcon.idle:
# pgcon could have received the query results while we
# were acquiring a system connection to cancel it.
return False
if pgcon.is_cancelling():
# Somehow the connection is already being cancelled and
# we don't want to have to cancellations go in parallel.
return False
pgcon.start_pg_cancellation()
try:
# Returns True if the `pid` exists and it was able to send it a
# SIGINT. Will throw an exception if the priveleges aren't
# sufficient.
result = await syscon.simple_query(
f'SELECT pg_cancel_backend({pgcon.backend_pid});'.encode(),
ignore_data=False
)
finally:
pgcon.finish_pg_cancellation()
return result[0][0] == b't'
finally:
self._release_sys_pgcon()
async def _cancel_and_discard_pgcon(self, pgcon, dbname) -> None:
try:
if self._serving:
await self._cancel_pgcon_operation(pgcon)
finally:
self.release_pgcon(dbname, pgcon, discard=True)
async def _signal_sysevent(self, event, **kwargs):
try:
if not self._initing and not self._serving:
# This is very likely if we are doing
# "run_startup_script_and_exit()", but is also possible if the
# server was shut down with this coroutine as a background task
# in flight.
return
pgcon = await self._acquire_sys_pgcon()
try:
await pgcon.signal_sysevent(event, **kwargs)
finally:
self._release_sys_pgcon()
except Exception:
metrics.background_errors.inc(1.0, 'signal_sysevent')
raise
def _on_remote_ddl(self, dbname):
# Triggered by a postgres notification event 'schema-changes'
# on the __edgedb_sysevent__ channel
async def task():
try:
await self.introspect_db(dbname)
except Exception:
metrics.background_errors.inc(1.0, 'on_remote_ddl')
raise
self.create_task(task(), interruptable=True)
def _on_remote_database_config_change(self, dbname):
# Triggered by a postgres notification event 'database-config-changes'
# on the __edgedb_sysevent__ channel
async def task():
try:
await self.introspect_db(dbname)
except Exception:
metrics.background_errors.inc(
1.0, 'on_remote_database_config_change')
raise
self.create_task(task(), interruptable=True)
def _on_local_database_config_change(self, dbname):
# Triggered by DB Index.
# It's easier and safer to just schedule full re-introspection
# of the DB and update all components of it.
async def task():
try:
await self.introspect_db(dbname)
except Exception:
metrics.background_errors.inc(
1.0, 'on_local_database_config_change')
raise
self.create_task(task(), interruptable=True)
def _on_remote_system_config_change(self):
# Triggered by a postgres notification event 'system-config-changes'
# on the __edgedb_sysevent__ channel
async def task():
try:
await self.reload_sys_config()
except Exception:
metrics.background_errors.inc(
1.0, 'on_remote_system_config_change')
raise
self.create_task(task(), interruptable=True)
def _on_global_schema_change(self):
async def task():
try:
await self._reintrospect_global_schema()
except Exception:
metrics.background_errors.inc(
1.0, 'on_global_schema_change')
raise
self.create_task(task(), interruptable=True)
def _on_sys_pgcon_connection_lost(self, exc):
try:
if not self._serving:
# The server is shutting down, release all events so that
# the waiters if any could continue and exit
self._sys_pgcon_ready_evt.set()
self._sys_pgcon_reconnect_evt.set()
return
logger.error(
"Connection to the system database is " +
("closed." if exc is None else f"broken! Reason: {exc}")
)
self.set_pg_unavailable_msg(
"Connection is lost, please check server log for the reason."
)
self.__sys_pgcon = None
self._sys_pgcon_ready_evt.clear()
self.create_task(self._reconnect_sys_pgcon(), interruptable=True)
self._on_pgcon_broken(True)
except Exception:
metrics.background_errors.inc(1.0, 'on_sys_pgcon_connection_lost')
raise
def _on_sys_pgcon_parameter_status_updated(self, name, value):
try:
if name == 'in_hot_standby' and value == 'on':
# It is a strong evidence of failover if the sys_pgcon receives
# a notification that in_hot_standby is turned on.
self._on_sys_pgcon_failover_signal()
except Exception:
metrics.background_errors.inc(
1.0, 'on_sys_pgcon_parameter_status_updated')
raise
def _on_sys_pgcon_failover_signal(self):
if not self._serving:
return
try:
if self._backend_adaptive_ha is not None:
# Switch to FAILOVER if adaptive HA is enabled
self._backend_adaptive_ha.set_state_failover()
elif getattr(self._cluster, '_ha_backend', None) is None:
# If the server is not using an HA backend, nor has enabled the
# adaptive HA monitoring, we still tries to "switch over" by
# disconnecting all pgcons if failover signal is received,
# allowing reconnection to happen sooner.
self.on_switch_over()
# Else, the HA backend should take care of calling on_switch_over()
except Exception:
metrics.background_errors.inc(1.0, 'on_sys_pgcon_failover_signal')
raise
def _on_pgcon_broken(self, is_sys_pgcon=False):
try:
if self._backend_adaptive_ha:
self._backend_adaptive_ha.on_pgcon_broken(is_sys_pgcon)
except Exception:
metrics.background_errors.inc(1.0, 'on_pgcon_broken')
raise
def _on_pgcon_lost(self):
try:
if self._backend_adaptive_ha:
self._backend_adaptive_ha.on_pgcon_lost()
except Exception:
metrics.background_errors.inc(1.0, 'on_pgcon_lost')
raise
async def _reconnect_sys_pgcon(self):
try:
conn = None
while self._serving:
try:
conn = await self._pg_connect(defines.EDGEDB_SYSTEM_DB)
break
except (ConnectionError, TimeoutError):
# Keep retrying as far as:
# 1. The EdgeDB server is still serving,
# 2. We still cannot connect to the Postgres cluster, or
pass
except pgcon_errors.BackendError as e:
# 3. The Postgres cluster is still starting up, or the
# HA failover is still in progress
if not (
e.code_is(pgcon_errors.ERROR_FEATURE_NOT_SUPPORTED) or
e.code_is(pgcon_errors.ERROR_CANNOT_CONNECT_NOW) or
e.code_is(pgcon_errors.ERROR_READ_ONLY_SQL_TRANSACTION)
):
# TODO: ERROR_FEATURE_NOT_SUPPORTED should be removed
# once PostgreSQL supports SERIALIZABLE in hot standbys
raise
if self._serving:
try:
# Retry after INTERVAL seconds, unless the event is set
# and we can retry immediately after the event.
await asyncio.wait_for(
self._sys_pgcon_reconnect_evt.wait(),
defines.SYSTEM_DB_RECONNECT_INTERVAL,
)
# But the event can only skip one INTERVAL.
self._sys_pgcon_reconnect_evt.clear()
except asyncio.TimeoutError:
pass
if not self._serving:
if conn is not None:
conn.abort()
return
logger.info("Successfully reconnected to the system database.")
self.__sys_pgcon = conn
self.__sys_pgcon.mark_as_system_db()
# This await is meant to be after mark_as_system_db() because we
# need the pgcon to be able to trigger another reconnect if its
# connection is lost during this await.
await self.__sys_pgcon.listen_for_sysevent()
self.set_pg_unavailable_msg(None)
finally:
self._sys_pgcon_ready_evt.set()
async def run_startup_script_and_exit(self):
"""Run the script specified in *startup_script* and exit immediately"""
if self._startup_script is None:
raise AssertionError('startup script is not defined')
await self._create_compiler_pool()
try:
await binary.EdgeConnection.run_script(
server=self,
database=self._startup_script.database,
user=self._startup_script.user,
script=self._startup_script.text,
)
finally:
await self._destroy_compiler_pool()
async def _start_server(
self, host: str, port: int
) -> Optional[asyncio.AbstractServer]:
proto_factory = lambda: protocol.HttpProtocol(
self,
self._sslctx,
binary_endpoint_security=self._binary_endpoint_security,
http_endpoint_security=self._http_endpoint_security,
)
try:
return await self.__loop.create_server(
proto_factory, host=host, port=port)
except Exception as e:
logger.warning(
f"could not create listen socket for '{host}:{port}': {e}"
)
return None
async def _start_admin_server(self, port: int) -> asyncio.AbstractServer:
admin_unix_sock_path = os.path.join(
self._runstate_dir, f'.s.EDGEDB.admin.{port}')
assert len(admin_unix_sock_path) <= (
defines.MAX_RUNSTATE_DIR_PATH
+ defines.MAX_UNIX_SOCKET_PATH_LENGTH
+ 1
), "admin Unix socket length exceeds maximum allowed"
admin_unix_srv = await self.__loop.create_unix_server(
lambda: binary.EdgeConnection(self, external_auth=True),
admin_unix_sock_path
)
os.chmod(admin_unix_sock_path, stat.S_IRUSR | stat.S_IWUSR)
logger.info('Serving admin on %s', admin_unix_sock_path)
return admin_unix_srv
async def _start_servers(self, hosts, port, admin=True):
servers = {}
if port == 0:
# Automatic port selection requires us to start servers
# sequentially until we get a working bound socket to ensure
# consistent port value across all requested listen addresses.
try:
for host in hosts:
server = await self._start_server(host, port)
if server is not None:
if port == 0:
port = server.sockets[0].getsockname()[1]
servers[host] = server
except Exception:
await self._stop_servers(servers.values())
raise
else:
start_tasks = {}
try:
async with taskgroup.TaskGroup() as g:
for host in hosts:
start_tasks[host] = g.create_task(
self._start_server(host, port)
)
except Exception:
await self._stop_servers([
fut.result() for fut in start_tasks.values()
if (
fut.done()
and fut.exception() is None
and fut.result() is not None
)
])
raise
servers.update({
host: fut.result()
for host, fut in start_tasks.items()
if fut.result() is not None
})
if not servers:
raise StartupError("could not create any listen sockets")
addrs = []
for tcp_srv in servers.values():
for s in tcp_srv.sockets:
addrs.append(s.getsockname())
if len(addrs) > 1:
if port:
addr_str = f"{{{', '.join(addr[0] for addr in addrs)}}}:{port}"
else:
addr_str = f"""{{{', '.join(
f'{addr[0]}:{addr[1]}' for addr in addrs)}}}"""
elif addrs:
addr_str = f'{addrs[0][0]}:{addrs[0][1]}'
port = addrs[0][1]
else:
addr_str = None
if addr_str:
logger.info('Serving on %s', addr_str)
if admin and port:
try:
admin_unix_srv = await self._start_admin_server(port)
except Exception:
await self._stop_servers(servers.values())
raise
servers[ADMIN_PLACEHOLDER] = admin_unix_srv
return servers, port, addrs
def init_tls(
self,
tls_cert_file,
tls_key_file,
tls_cert_newly_generated,
):
assert self._sslctx is None
tls_password_needed = False
def _tls_private_key_password():
nonlocal tls_password_needed
tls_password_needed = True
return os.environ.get('EDGEDB_SERVER_TLS_PRIVATE_KEY_PASSWORD', '')
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
try:
sslctx.load_cert_chain(
tls_cert_file,
tls_key_file,
password=_tls_private_key_password,
)
except ssl.SSLError as e:
if e.library == "SSL" and e.errno == 9: # ERR_LIB_PEM
if tls_password_needed:
if _tls_private_key_password():
raise StartupError(
"Cannot load TLS certificates - it's likely that "
"the private key password is wrong."
) from e
else:
raise StartupError(
"Cannot load TLS certificates - the private key "
"file is likely protected by a password. Specify "
"the password using environment variable: "
"EDGEDB_SERVER_TLS_PRIVATE_KEY_PASSWORD"
) from e
elif tls_key_file is None:
raise StartupError(
"Cannot load TLS certificates - have you specified "
"the private key file using the `--tls-key-file` "
"command-line argument?"
) from e
else:
raise StartupError(
"Cannot load TLS certificates - please double check "
"if the specified certificate files are valid."
)
elif e.library == "X509" and e.errno == 116:
# X509 Error 116: X509_R_KEY_VALUES_MISMATCH
raise StartupError(
"Cannot load TLS certificates - the private key doesn't "
"match the certificate."
)
raise StartupError(f"Cannot load TLS certificates - {e}") from e
sslctx.set_alpn_protocols(['edgedb-binary', 'http/1.1'])
self._sslctx = sslctx
self._tls_cert_file = str(tls_cert_file)
self._tls_cert_newly_generated = tls_cert_newly_generated
async def _stop_servers(self, servers):
async with taskgroup.TaskGroup() as g:
for srv in servers:
srv.close()
g.create_task(srv.wait_closed())
async def start(self):
self._stop_evt.clear()
assert self._task_group is None
self._task_group = taskgroup.TaskGroup()
await self._task_group.__aenter__()
self._accept_new_tasks = True
self._http_request_logger = self.create_task(
self._request_stats_logger(), interruptable=True
)
await self._cluster.start_watching(self)
await self._create_compiler_pool()
if self._startup_script:
await binary.EdgeConnection.run_script(
server=self,
database=self._startup_script.database,
user=self._startup_script.user,
script=self._startup_script.text,
)
self._servers, actual_port, listen_addrs = await self._start_servers(
await _resolve_interfaces(self._listen_hosts),
self._listen_port,
)
self._listen_hosts = listen_addrs
self._listen_port = actual_port
self._accepting_connections = True
self._serving = True
if self._echo_runtime_info:
ri = {
"port": self._listen_port,
"runstate_dir": str(self._runstate_dir),
"tls_cert_file": self._tls_cert_file,
}
print(f'\nEDGEDB_SERVER_DATA:{json.dumps(ri)}\n', flush=True)
for status_sink in self._status_sinks:
status = {
"listen_addrs": listen_addrs,
"port": self._listen_port,
"socket_dir": str(self._runstate_dir),
"main_pid": os.getpid(),
"tenant_id": self._tenant_id,
"tls_cert_file": self._tls_cert_file,
"tls_cert_newly_generated": self._tls_cert_newly_generated,
}
status_sink(f'READY={json.dumps(status)}')
async def stop(self):
try:
self._serving = False
self._accept_new_tasks = False
if self._idle_gc_handler is not None:
self._idle_gc_handler.cancel()
self._idle_gc_handler = None
self._cluster.stop_watching()
if self._http_request_logger is not None:
self._http_request_logger.cancel()
await self._stop_servers(self._servers.values())
self._servers = {}
for conn in self._binary_conns:
conn.stop()
self._binary_conns.clear()
if self._task_group is not None:
tg = self._task_group
self._task_group = None
await tg.__aexit__(*sys.exc_info())
await self._destroy_compiler_pool()
finally:
if self.__sys_pgcon is not None:
self.__sys_pgcon.terminate()
self.__sys_pgcon = None
self._sys_pgcon_waiter = None
def create_task(self, coro, *, interruptable):
# Interruptable tasks are regular asyncio tasks that may be interrupted
# randomly in the middle when the event loop stops; while tasks with
# interruptable=False are always awaited before the server stops, so
# that e.g. all finally blocks get a chance to execute in those tasks.
if self._accept_new_tasks:
if interruptable:
return self.__loop.create_task(coro)
else:
return self._task_group.create_task(coro)
else:
# Silence the "coroutine not awaited" warning
logger.debug(
"Task is not started and ignored: %r", coro.cr_code.co_name
)
coro.__await__()
async def serve_forever(self):
await self._stop_evt.wait()
async def get_auth_method(self, user):
authlist = self._sys_auth
if authlist:
for auth in authlist:
match = (
(user in auth.user or '*' in auth.user)
)
if match:
return auth.method
auth_type = config.get_settings().get_type_by_name(
self._default_auth_method)
return auth_type()
def get_sys_query(self, key):
return self._sys_queries[key]
def get_instance_data(self, key):
return self._instance_data[key]
def get_backend_runtime_params(self) -> Any:
return self._cluster.get_runtime_params()
def set_pg_unavailable_msg(self, msg):
if msg is None or self._pg_unavailable_msg is None:
self._pg_unavailable_msg = msg
def on_switch_over(self):
# Bumping this serial counter will "cancel" all pending connections
# to the old master.
self._ha_master_serial += 1
self.create_task(
self._pg_pool.prune_all_connections(), interruptable=True
)
if self.__sys_pgcon is None:
# Assume a reconnect task is already running, now that we know the
# new master is likely ready, let's just give the task a push.
self._sys_pgcon_reconnect_evt.set()
else:
# Brutally close the sys_pgcon to the old master - this should
# trigger a reconnect task.
self.__sys_pgcon.abort()
if self._backend_adaptive_ha is not None:
# Switch to FAILOVER if adaptive HA is enabled
self._backend_adaptive_ha.set_state_failover(
call_on_switch_over=False
)
def get_active_pgcon_num(self) -> int:
return (
self._pg_pool.current_capacity - self._pg_pool.get_pending_conns()
)
def get_debug_info(self):
"""Used to render the /server-info endpoint in dev/test modes.
Some tests depend on the exact layout of the returned structure.
"""
def serialize_config(cfg):
return {name: value.value for name, value in cfg.items()}
obj = dict(
params=dict(
max_backend_connections=self._max_backend_connections,
suggested_client_pool_size=self._suggested_client_pool_size,
tenant_id=self._tenant_id,
dev_mode=self._devmode,
test_mode=self._testmode,
default_auth_method=self._default_auth_method,
listen_hosts=self._listen_hosts,
listen_port=self._listen_port,
),
instance_config=serialize_config(self._dbindex.get_sys_config()),
user_roles=self._roles,
pg_addr=self._pg_addr,
pg_pool=self._pg_pool._build_snapshot(now=time.monotonic()),
compiler_pool=dict(
worker_pids=list(self._compiler_pool._workers.keys()),
template_pid=self._compiler_pool.get_template_pid(),
),
)
dbs = {}
for db in self._dbindex.iter_dbs():
dbs[db.name] = dict(
name=db.name,
dbver=db.dbver,
config=serialize_config(db.db_config),
extensions=list(db.extensions.keys()),
query_cache_size=db.get_query_cache_size(),
connections=[
dict(
in_tx=view.in_tx(),
in_tx_error=view.in_tx_error(),
config=serialize_config(view.get_session_config()),
module_aliases=view.get_modaliases(),
)
for view in db.iter_views()
],
)
obj['databases'] = dbs
return obj
def _cleanup_wildcard_addrs(
hosts: Sequence[str]
) -> tuple[list[str], list[str]]:
"""Filter out conflicting addresses in presence of INADDR_ANY wildcards.
Attempting to bind to 0.0.0.0 (or ::) _and_ a non-wildcard address will
usually result in EADDRINUSE. To avoid this, filter out all specific
addresses if a wildcard is present in the *hosts* sequence.
Returns a tuple: first element is the new list of hosts, second
element is a list of rejected host addrs/names.
"""
ipv4_hosts = set()
ipv6_hosts = set()
named_hosts = set()
ipv4_wc = ipaddress.ip_address('0.0.0.0')
ipv6_wc = ipaddress.ip_address('::')
for host in hosts:
if host == "*":
ipv4_hosts.add(ipv4_wc)
ipv6_hosts.add(ipv6_wc)
continue
try:
ip = ipaddress.IPv4Address(host)
except ValueError:
pass
else:
ipv4_hosts.add(ip)
continue
try:
ip6 = ipaddress.IPv6Address(host)
except ValueError:
pass
else:
ipv6_hosts.add(ip6)
continue
named_hosts.add(host)
if not ipv4_hosts and not ipv6_hosts:
return (list(hosts), [])
if ipv4_wc not in ipv4_hosts and ipv6_wc not in ipv6_hosts:
return (list(hosts), [])
if ipv4_wc in ipv4_hosts and ipv6_wc in ipv6_hosts:
return (
['0.0.0.0', '::'],
[str(a) for a in
((named_hosts | ipv4_hosts | ipv6_hosts) - {ipv4_wc, ipv6_wc})]
)
if ipv4_wc in ipv4_hosts:
return (
[str(a) for a in ({ipv4_wc} | ipv6_hosts)],
[str(a) for a in ((named_hosts | ipv4_hosts) - {ipv4_wc})]
)
if ipv6_wc in ipv6_hosts:
return (
[str(a) for a in ({ipv6_wc} | ipv4_hosts)],
[str(a) for a in ((named_hosts | ipv6_hosts) - {ipv6_wc})]
)
raise AssertionError('unreachable')
async def _resolve_host(host: str) -> list[str] | Exception:
loop = asyncio.get_running_loop()
try:
addrinfo = await loop.getaddrinfo(
None if host == '*' else host,
0,
family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM,
flags=socket.AI_PASSIVE,
)
except Exception as e:
return e
else:
return [addr[4][0] for addr in addrinfo]
async def _resolve_interfaces(hosts: Sequence[str]) -> Sequence[str]:
async with taskgroup.TaskGroup() as g:
resolve_tasks = {
host: g.create_task(_resolve_host(host))
for host in hosts
}
addrs = []
for host, fut in resolve_tasks.items():
result = fut.result()
if isinstance(result, Exception):
logger.warning(
f"could not translate host name {host!r} to address: {result}")
else:
addrs.extend(result)
clean_addrs, rejected_addrs = _cleanup_wildcard_addrs(addrs)
if rejected_addrs:
logger.warning(
"wildcard addresses found in listen_addresses; " +
"discarding the other addresses: " +
", ".join(repr(h) for h in rejected_addrs)
)
return clean_addrs
| 35.853982
| 79
| 0.584274
|
acfd8e119d81eb59ad75842370d40be24705decb
| 1,293
|
py
|
Python
|
sickbeard/lib/guessit/transfo/guess_website.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | null | null | null |
sickbeard/lib/guessit/transfo/guess_website.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | null | null | null |
sickbeard/lib/guessit/transfo/guess_website.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | 1
|
2021-08-29T18:32:43.000Z
|
2021-08-29T18:32:43.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit.transfo import SingleNodeGuesser
from guessit.patterns import websites
import logging
log = logging.getLogger(__name__)
def guess_website(string):
low = string.lower()
for site in websites:
pos = low.find(site.lower())
if pos != -1:
return {'website': site}, (pos, pos + len(site))
return None, None
def process(mtree):
SingleNodeGuesser(guess_website, 1.0, log).process(mtree)
| 32.325
| 74
| 0.730085
|
acfd8e9997ee375d26d5da554edcb26dbc670e3e
| 4,172
|
py
|
Python
|
tests/core/cmds/test_wallet.py
|
Chinilla/chinilla-blockchain
|
59bebcf94e65b74fbb53ad4929bbd79cb28be619
|
[
"Apache-2.0"
] | null | null | null |
tests/core/cmds/test_wallet.py
|
Chinilla/chinilla-blockchain
|
59bebcf94e65b74fbb53ad4929bbd79cb28be619
|
[
"Apache-2.0"
] | null | null | null |
tests/core/cmds/test_wallet.py
|
Chinilla/chinilla-blockchain
|
59bebcf94e65b74fbb53ad4929bbd79cb28be619
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, Dict, Optional, Tuple
import pytest
from chinilla.cmds.wallet_funcs import print_offer_summary
from chinilla.types.blockchain_format.sized_bytes import bytes32
from chinilla.util.ints import uint32
TEST_DUCKSAUCE_ASSET_ID = "1000000000000000000000000000000000000000000000000000000000000001"
TEST_CRUNCHBERRIES_ASSET_ID = "1000000000000000000000000000000000000000000000000000000000000002"
TEST_UNICORNTEARS_ASSET_ID = "1000000000000000000000000000000000000000000000000000000000000003"
TEST_ASSET_ID_NAME_MAPPING: Dict[bytes32, Tuple[uint32, str]] = {
bytes32.from_hexstr(TEST_DUCKSAUCE_ASSET_ID): (uint32(2), "DuckSauce"),
bytes32.from_hexstr(TEST_CRUNCHBERRIES_ASSET_ID): (uint32(3), "CrunchBerries"),
bytes32.from_hexstr(TEST_UNICORNTEARS_ASSET_ID): (uint32(4), "UnicornTears"),
}
async def cat_name_resolver(asset_id: bytes32) -> Optional[Tuple[Optional[uint32], str]]:
return TEST_ASSET_ID_NAME_MAPPING.get(asset_id)
@pytest.mark.asyncio
async def test_print_offer_summary_hcx(capsys: Any) -> None:
summary_dict = {"hcx": 1_000_000_000_000}
await print_offer_summary(cat_name_resolver, summary_dict)
captured = capsys.readouterr()
assert "HCX (Wallet ID: 1): 1.0 (1000000000000 vojos)" in captured.out
@pytest.mark.asyncio
async def test_print_offer_summary_cat(capsys: Any) -> None:
summary_dict = {
TEST_DUCKSAUCE_ASSET_ID: 1_000,
}
await print_offer_summary(cat_name_resolver, summary_dict)
captured = capsys.readouterr()
assert "DuckSauce (Wallet ID: 2): 1.0 (1000 vojos)" in captured.out
@pytest.mark.asyncio
async def test_print_offer_summary_multiple_cats(capsys: Any) -> None:
summary_dict = {
TEST_DUCKSAUCE_ASSET_ID: 1_000,
TEST_CRUNCHBERRIES_ASSET_ID: 2_000,
}
await print_offer_summary(cat_name_resolver, summary_dict)
captured = capsys.readouterr()
assert "DuckSauce (Wallet ID: 2): 1.0 (1000 vojos)" in captured.out
assert "CrunchBerries (Wallet ID: 3): 2.0 (2000 vojos)" in captured.out
@pytest.mark.asyncio
async def test_print_offer_summary_hcx_and_cats(capsys: Any) -> None:
summary_dict = {
"hcx": 2_500_000_000_000,
TEST_DUCKSAUCE_ASSET_ID: 1_111,
TEST_CRUNCHBERRIES_ASSET_ID: 2_222,
TEST_UNICORNTEARS_ASSET_ID: 3_333,
}
await print_offer_summary(cat_name_resolver, summary_dict)
captured = capsys.readouterr()
assert "HCX (Wallet ID: 1): 2.5 (2500000000000 vojos)" in captured.out
assert "DuckSauce (Wallet ID: 2): 1.111 (1111 vojos)" in captured.out
assert "CrunchBerries (Wallet ID: 3): 2.222 (2222 vojos)" in captured.out
assert "UnicornTears (Wallet ID: 4): 3.333 (3333 vojos)" in captured.out
@pytest.mark.asyncio
async def test_print_offer_summary_hcx_and_cats_with_zero_values(capsys: Any) -> None:
summary_dict = {
"hcx": 0,
TEST_DUCKSAUCE_ASSET_ID: 0,
TEST_CRUNCHBERRIES_ASSET_ID: 0,
TEST_UNICORNTEARS_ASSET_ID: 0,
}
await print_offer_summary(cat_name_resolver, summary_dict)
captured = capsys.readouterr()
assert "HCX (Wallet ID: 1): 0.0 (0 vojos)" in captured.out
assert "DuckSauce (Wallet ID: 2): 0.0 (0 vojos)" in captured.out
assert "CrunchBerries (Wallet ID: 3): 0.0 (0 vojos)" in captured.out
assert "UnicornTears (Wallet ID: 4): 0.0 (0 vojos)" in captured.out
@pytest.mark.asyncio
async def test_print_offer_summary_cat_with_fee_and_change(capsys: Any) -> None:
summary_dict = {
TEST_DUCKSAUCE_ASSET_ID: 1_000,
"unknown": 3_456,
}
await print_offer_summary(cat_name_resolver, summary_dict, has_fee=True)
captured = capsys.readouterr()
assert "DuckSauce (Wallet ID: 2): 1.0 (1000 vojos)" in captured.out
assert "Unknown: 3456 vojos [Typically represents change returned from the included fee]" in captured.out
@pytest.mark.asyncio
async def test_print_offer_summary_hcx_with_one_vojo(capsys: Any) -> None:
summary_dict = {"hcx": 1}
await print_offer_summary(cat_name_resolver, summary_dict)
captured = capsys.readouterr()
assert "HCX (Wallet ID: 1): 1e-12 (1 vojo)" in captured.out
| 33.376
| 110
| 0.740892
|
acfd8f25c4b55f44802663c5a6f903b61d51f5cc
| 70,259
|
py
|
Python
|
barista-scan/tools/scancode-toolkit/src/cluecode/copyrights.py
|
vsurge/barista
|
391069988cf77353246c59a55b97dc8622a1128b
|
[
"Apache-2.0"
] | 55
|
2020-01-25T00:29:01.000Z
|
2022-02-04T04:52:35.000Z
|
barista-scan/tools/scancode-toolkit/src/cluecode/copyrights.py
|
vsurge/barista
|
391069988cf77353246c59a55b97dc8622a1128b
|
[
"Apache-2.0"
] | 231
|
2020-01-27T21:33:23.000Z
|
2022-03-23T21:27:24.000Z
|
barista-scan/tools/scancode-toolkit/src/cluecode/copyrights.py
|
vsurge/barista
|
391069988cf77353246c59a55b97dc8622a1128b
|
[
"Apache-2.0"
] | 18
|
2020-01-27T17:14:11.000Z
|
2022-03-09T02:33:40.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from collections import deque
import os
import re
from commoncode.text import toascii
from commoncode.text import unixlinesep
from cluecode import copyrights_hint
from textcode import analysis
# Tracing flags
TRACE = False or os.environ.get('SCANCODE_DEBUG_COPYRIGHT', False)
# set to 1 to enable nltk deep tracing
TRACE_DEEP = 0
if os.environ.get('SCANCODE_DEBUG_COPYRIGHT_DEEP'):
TRACE_DEEP = 1
# Tracing flags
def logger_debug(*args):
pass
if TRACE or TRACE_DEEP:
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, (unicode, str)) and a or repr(a) for a in args))
"""
Detect and collect copyright statements.
The process consists in:
- prepare and cleanup text
- identify regions of text that may contain copyright (using hints)
- tag the text for parts-of-speech (POS) to identify various copyright
statements parts such as dates, names ("named entities"), etc. This is done
using NLTK POS tagging
- feed the tagged text to a parsing grammar describing actual copyright
statements
- yield copyright statements,holder and authors with start and end line
from the parse tree with some post-detection cleanups.
"""
def detect_copyrights(location, copyrights=True, holders=True, authors=True, include_years=True):
"""
Yield tuples of (detection type, detected string, start line, end line)
detected in file at `location`.
Include years in copyrights if include_years is True.
Valid detection types are: copyrights, authors, holders.
These are included in the yielded tuples based on the values of `copyrights=True`, `holders=True`, `authors=True`,
"""
detector = CopyrightDetector()
numbered_lines = analysis.numbered_text_lines(location, demarkup=True)
numbered_lines = list(numbered_lines)
if TRACE:
numbered_lines = list(numbered_lines)
for nl in numbered_lines:
logger_debug('numbered_line:', repr(nl))
for candidates in candidate_lines(numbered_lines):
for detection in detector.detect(candidates, copyrights, holders, authors, include_years):
# tuple of type, string, start, end
yield detection
_YEAR = (r'('
'19[6-9][0-9]' # 1960 to 1999
'|'
'20[0-1][0-9]' # 2000 to 2019
')')
_YEAR_SHORT = (r'('
'[6-9][0-9]' # 19-60 to 19-99
'|'
'[0-1][0-9]' # 20-00 to 20-19
')')
_YEAR_YEAR = (r'('
# fixme v ....the underscore below is suspicious
'19[6-9][0-9][\.,\-]_[6-9][0-9]' # 1960-99
'|'
'19[6-9][0-9][\.,\-]+[0-9]' # 1998-9
'|'
'20[0-1][0-9][\.,\-]+[0-1][0-9]' # 2001-16 or 2012-04
'|'
'200[0-9][\.,\-]+[0-9]' # 2001-4 not 2012
')')
_PUNCT = (r'('
'['
'\W' # not a word (word includes underscore)
'\D' # not a digit
'\_' # underscore
'i' # oddity
'\?'
']'
'|'
'\ ' # html entity sometimes are double escaped
')*') # repeated 0 or more times
_YEAR_PUNCT = _YEAR + _PUNCT
_YEAR_YEAR_PUNCT = _YEAR_YEAR + _PUNCT
_YEAR_SHORT_PUNCT = _YEAR_SHORT + _PUNCT
_YEAR_OR_YEAR_YEAR_WITH_PUNCT = (r'(' +
_YEAR_PUNCT +
'|' +
_YEAR_YEAR_PUNCT +
')')
_YEAR_THEN_YEAR_SHORT = (r'(' +
_YEAR_OR_YEAR_YEAR_WITH_PUNCT +
'(' +
_YEAR_SHORT_PUNCT +
')*' +
')')
pats = [
_YEAR,
_YEAR_SHORT,
_YEAR_YEAR,
_PUNCT,
_YEAR_OR_YEAR_YEAR_WITH_PUNCT
]
# FIXME: multi-tokens patterns are likely not behaving as expected
# FIXME: patterns could be greatly simplified
patterns = [
# TODO: this needs to be simplified:
# TODO: in NLTK 3.0 this will fail because of this bug:
# https://github.com/nltk/nltk/issues/1025
# a single comma is not an NNP
(r'^,$', 'CC'),
# JUNK are things to ignore
# All Rights Reserved. should be a terminator/delimiter.
(r'^([Aa]ll [Rr]ights? [Rr]eserved|ALL RIGHTS? RESERVED|[Aa]ll|ALL)$', 'JUNK'),
(r'^([Rr]eserved|RESERVED)[,]?$', 'JUNK'),
# found in crypto certificates and LDAP
(r'^(O=?|OU=?|XML)$', 'JUNK'),
(r'^(Parser|Dual|Crypto|NO|PART|[Oo]riginall?y?|[Rr]epresentations?\.?)$', 'JUNK'),
(r'^(Refer|Apt|Agreement|Usage|Please|Based|Upstream|Files?|Filename:?|'
r'Description:?|[Pp]rocedures?|You|Everyone)$', 'JUNK'),
(r'^(Rights?|Unless|rant|Subject|Acknowledgements?|Special)$', 'JUNK'),
(r'^(LICEN[SC]E[EDS]?|Licen[sc]e[eds]?)$', 'TOIGNORE'),
(r'^(Derivative|[Ll]icensable|[Ss]ince|[Ll]icen[cs]e[\.d]?|'
r'[Ll]icen[cs]ors?|under)$', 'JUNK'),
(r'^(TCK|Use|[Rr]estrictions?|[Ii]ntrodu`ction)$', 'JUNK'),
(r'^([Ii]ncludes?|[Vv]oluntary|[Cc]ontributions?|[Mm]odifications?)$', 'JUNK'),
(r'^(Company:|For|File|Last|[Rr]eleased?|[Cc]opyrighting)$', 'JUNK'),
(r'^Authori.*$', 'JUNK'),
(r'^[Bb]uild$', 'JUNK'),
(r'^[Ss]tring$', 'JUNK'),
(r'^Implementation-Vendor$', 'JUNK'),
(r'^(dnl|rem|REM)$', 'JUNK'),
(r'^Implementation-Vendor$', 'JUNK'),
(r'^Supports|Separator$', 'JUNK'),
(r'^\.byte|Idata$', 'JUNK'),
(r'^[Cc]ontributed?$', 'JUNK'),
(r'^[Ff]unctions?$', 'JUNK'),
(r'^[Nn]otices?|[Mm]ust$', 'JUNK'),
(r'^ISUPPER?|ISLOWER$', 'JUNK'),
(r'^AppPublisher$', 'JUNK'),
(r'^DISCLAIMS?|SPECIFICALLY|WARRANT(Y|I)E?S?$', 'JUNK'),
(r'^(hispagestyle|Generic|Change|Add|Generic|Average|Taken|LAWS\.?|design|Driver)$', 'JUNK'),
(r'^[Cc]ontribution\.?', 'JUNK'),
(r'(DeclareUnicodeCharacter|Language-Team|Last-Translator|OMAP730|Law\.)$', 'JUNK'),
(r'^dylid|BeOS|Generates?|Thanks?', 'JUNK'),
# various programming constructs
(r'^(var|this|return|function|thats?|xmlns|file)$', 'JUNK'),
(r'^(([A-Z][a-z]+){3,}[A-Z]+[,]?)$', 'JUNK'),
(r'^(([A-Z][a-z]+){3,}[A-Z]+[0-9]+[,]?)$', 'JUNK'),
# multiple parens (at least two (x) groups) is a sign of junk
# such as in (1)(ii)(OCT
(r'^.*\(.*\).*\(.*\).*$', 'JUNK'),
# neither and nor conjunctions and some common licensing words are NOT part
# of a copyright statement
(r'^(neither|nor|providing|Execute|NOTICE|passes|LAWS\,?|Should'
r'|Licensing|Disclaimer|Law|Some|Derived|Limitations?|Nothing|Policy'
r'|available|Recipient\.?|LICENSEE|Application|Receiving|Party|interfaces'
r'|owner|Sui|Generis|Conditioned|Disclaimer|Warranty|Represents|Sufficient|Each'
r'|Partially|Limitation|Liability|Named|Use.|EXCEPT|OWNER\.?|Comments\.?'
r')$', 'JUNK'),
# various trailing words that are junk
(r'^(?:Copyleft|LegalCopyright|AssemblyCopyright|Distributed|Report|'
r'Available|true|false|node|jshint|node\':true|node:true|this|Act,?|'
r'[Ff]unctionality|bgcolor|F+|Rewrote|Much|remains?,?|Implementation|earlier'
r'|al.|is|[lL]aws?|Insert|url|[Ss]ee|[Pp]ackage\.?|'
r'|Covered|date|practices'
r'|fprintf.*'
r'|CURDIR|Environment/Libraries|Environment/Base'
r')$', 'JUNK'),
# some copyright templates in licenses
(r'^\$(date-of-software|date-of-document)$', 'JUNK'),
# NOT A CAPS
# [YEAR] W3C® (MIT, ERCIM, Keio, Beihang)."
(r'^YEAR', 'NN'),
# RCS keywords
(r'^(Header|Id|Locker|Log|RCSfile|Revision)$', 'NN'),
# this trigger otherwise "copyright ownership. The ASF" in Apache license headers
(r'^([Oo]wnership\.?)$', 'JUNK'),
# names with a slash that are NNP
# Research/Unidata , LCS/Telegraphics.
(r'^([A-Z]([a-z]|[A-Z])+/[A-Z][a-z]+[\.,]?)$', 'NNP'),
# with a comma, always CAPS (MIT alone is too error prone to be always tagged as CAPS
(r'^MIT,$', 'CAPS'),
# Various NN, exceptions to NNP or CAPS
(r'^(Send|It|Mac|Support|Information|Various|Mouse|Wheel'
r'|Vendor|Commercial|Indemnified|Luxi|These|Several|GnuPG|WPA|Supplicant'
r'|TagSoup|Contact|IA64|Foreign|Data|Atomic|Pentium|Note|Delay|Separa.*|Added'
r'|Glib|Gnome|Gaim|Open|Possible|In|Read|Permissions?|New|MIT'
r'|Agreement\.?|Immediately|Any|Custom|Reference|Each'
r'|Education|AIRTM|Copying|Updated|Source|Code|Website'
r'|Holder\.?'
r')?$', 'NN'),
# |Products\.?
# MORE NN exceptions to NNP or CAPS
# 'Berkeley Software Distribution',??
(r'^(Unicode|Modified|NULL|FALSE|False|TRUE|True|Last|Predefined|If|Standard'
r'|Versions?\.?|Package|PACKAGE|Powered|License[d\.e\:]?|License-Alias\:?|Legal'
r'|Entity|Indemnification\.?|IS|This|Java|DoubleClick|DOM|SAX|URL|Operating'
r'|Original|Release|IEEE|Std|BSD|POSIX|Derivative|Works|Intellij|IDEA|README'
r'|NEWS|CHANGELOG|Change[lL]og|CHANGElogger|SIGN|F2Wku|LegalTrademarks|OriginalFilename'
r'|PGP|Sort|Redistribution|Reserved\.?'
r')$', 'NN'),
# MORE NN exceptions to CAPS
(r'^(OR|VALUE|END)$', 'NN'),
# Various rare non CAPS but NNP, treated as full names
(r'^(FSF[\.,]?)$', 'NAME'),
# Windows XP
(r'^(Windows|XP|SP1|SP2|SP3|SP4|assembly)$', 'JUNK'),
# various junk bits
(r'^example\.com$', 'JUNK'),
(r'^null$', 'JUNK'),
# when uppercase this is likely part of some SQL statement
(r'FROM|CREATE|CURDIR', 'JUNK'),
(r'RECURSIVE|VIEW', 'NN'),
# found in sqlite
(r'\+0|ToUpper', 'JUNK'),
# Java
(r'^.*Servlet,?|class$', 'JUNK'),
# C/C++
(r'^(template|struct|typedef|type|next|typename|namespace|type_of|begin|end)$', 'JUNK'),
# Some mixed case junk
(r'^LastModified$', 'JUNK'),
# Some font names
(r'^Lucida$', 'JUNK'),
# various trailing words that are junk
(r'^(?:CVS|EN-IE|Info|GA|unzip)$', 'JUNK'),
# this is not Copr.
(r'Coproduct,?', 'JUNK'),
# Places: TODO: these are NOT NNPs but we treat them as such for now
(r'^\(?(?:Cambridge|Stockholm|Davis|Sweden[\)\.]?|Massachusetts|Oregon|California'
r'|Norway|UK|Berlin|CONCORD|Manchester|MASSACHUSETTS|Finland|Espoo|Munich'
r'|Germany|Italy|Spain|Europe'
r'|Lafayette|Indiana'
r')[\),\.]?$', 'NNP'),
# Date/Day/Month text references
(r'^(Date|am|pm|AM|PM)$', 'NN'),
(r'^(January|February|March|April|May|June|July|August|September|October|November|December)$', 'NN'),
# Jan and Jun are common enough first names
(r'^(Feb|Mar|Apr|May|Jul|Aug|Sep|Oct|Nov|Dec)$', 'NN'),
(r'^(Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)$', 'NN'),
(r'^(Mon|Tue|Wed|Thu|Fri|Sat|Sun)$', 'NN'),
(r'^\$?LastChangedDate\$?$', 'YR'),
# Misc corner case combos ?(mixed or CAPS) that are NNP
(r'^Software,\',|\(Royal|PARADIGM|nexB|D\.T\.Shield\.?|Antill\',$', 'NNP'),
# Corner cases of lowercased NNPs
(r'^(suzuki|toshiya\.?|leethomason|finney|sean|chris|ulrich'
r'|wadim|dziedzic|okunishinishi|yiminghe|daniel|wirtz'
r'|vonautomatisch|werkstaetten\.?|werken|various\.?)$', 'NNP'),
# rarer caps
# EPFL-LRC/ICA
(r'^[A-Z]{3,6}-[A-Z]{3,6}/[A-Z]{3,6}', 'NNP'),
# exceptions to composed proper nouns, mostly debian copyright-related
# FIXME: may be lowercase instead?
(r'^(Title:?|Debianized-By:?|Upstream-Maintainer:?|Content-MD5)$', 'JUNK'),
(r'^(Upstream-Author:?|Packaged-By:?)$', 'JUNK'),
# NOT a copyright symbol (ie. "copyrighted."): treat as NN
(r'^[Cc](opyright(s|ed)?|OPYRIGHT(S|ED))\.$', 'NN'),
# copyright word or symbol
# note the leading @ .... this may be a source of problems
(r'.?(@?([Cc]opyright)s?:?|[Cc]opr\.?|[(][Cc][)]|(COPYRIGHT)S?:?)', 'COPY'),
# copyright in markup, until we strip markup: apache'>Copyright or left'>Copyright
(r'[A-Za-z0-9]+[\'">]+[Cc]opyright', 'COPY'),
# A copyright line in .Net meta files
(r'^AssemblyCopyright$', 'COPY'),
# AT&T (the company), needs special handling
(r'^AT\&T[\.,]?$', 'COMP'),
# company suffix: Tech.,ltd
(r'^([A-Z][a-z]+[\.,]+[Ll][Tt][Dd]).?$', 'COMP'),
# company suffix
(r'^([Ii]nc[.]?|[I]ncorporated|[Cc]ompany|Limited|LIMITED).?$', 'COMP'),
# company suffix
(r'^(INC(ORPORATED|[.])?|CORP(ORATION|[.])?|FOUNDATION|GROUP|COMPANY|'
r'[(]tm[)]).?$|[Ff]orum.?', 'COMP'),
# company suffix
(r'^([cC]orp(oration|[\.,])?|[cC]orporations?[\.,]?|[fF]oundation|[Aa]lliance|Working|[Gg]roup|'
r'[Tt]echnolog(y|ies)|[Cc]ommunit(y|ies)|[Mm]icrosystems.?|[Pp]roject|'
r'[Tt]eams?|[Tt]ech).?$', 'COMP'),
(r"^Limited'?,?$", 'COMP'),
# company suffix : LLC, LTD, LLP followed by one extra char
(r'^([Ll][Ll][CcPp]|[Ll][Tt][Dd])\.?,?$', 'COMP'),
(r'^L\.P\.?$', 'COMP'),
(r'^[Ss]ubsidiar(y|ies)$', 'COMP'),
(r'^[Ss]ubsidiary\(\-ies\)\.?$', 'COMP'),
# company suffix : SA, SAS, AS, AG, AB, AS, CO, labs followed by a dot
(r'^(S\.?A\.?S?\.?|Sas\.?|sas\.?|AS\.?|AG\.?|AB\.?|Labs?\.?|[Cc][Oo]\.?|Research|Center|INRIA|Societe).?$', 'COMP'),
# (german) company suffix
(r'^[Gg][Mm][Bb][Hh].?$', 'COMP'),
# ( e.V. german) company suffix
(r'^[eV]\.[vV]\.?$', 'COMP'),
# (italian) company suffix
(r'^[sS]\.[pP]\.[aA]\.?$', 'COMP'),
# sweedish company suffix : ASA followed by a dot
(r'^ASA.?$', 'COMP'),
# czech company suffix: JetBrains s.r.o.
(r'^s\.r\.o\.?$', 'COMP'),
# (Laboratory) company suffix
(r'^(Labs?|Laboratory|Laboratories|Laboratoire)\.?,?$', 'COMP'),
# (dutch and belgian) company suffix
(r'^[Bb]\.?[Vv]\.?|BVBA$', 'COMP'),
# university
(r'^\(?[Uu]niv(?:[.]|ersit(?:y|e|at?|ad?))\)?\.?$', 'UNI'),
(r'^(UNIVERSITY|College)$', 'UNI'),
# Academia/ie
(r'^[Ac]cademi[ae]s?$', 'UNI'),
# institutes
(r'INSTITUTE', 'COMP'),
(r'^[Ii]nstitut(s|o|os|e|es|et|a|at|as|u|i)?$', 'COMP'),
# Facility
(r'Tecnologia', 'COMP'),
(r'Facility', 'COMP'),
# "holders" is considered Special
(r'^HOLDER\(S\)$', 'JUNK'),
(r'^([Hh]olders?|HOLDERS?)$', 'HOLDER'),
# not NNPs
(r'^([Rr]espective|JavaScript)$', 'NN'),
# affiliates or "and its affiliate(s)."
(r'^[Aa]ffiliate(s|\(s\))?\.?$', 'NNP'),
# OU as in Org unit, found in some certficates
(r'^OU$', 'OU'),
(r'^(CONTRIBUTORS?|OTHERS?|Contributors?\:)[,\.]?$', 'JUNK'),
# "authors" or "contributors" is interesting, and so a tag of its own
(r'^[Aa]uthor\.?$', 'AUTH'),
(r'^[Aa]uthors\.?$', 'AUTHS'),
(r'^[Aa]uthor\(s\)\.?$', 'AUTHS'),
(r'^[Cc]ontribut(ors?|ing)\.?$', 'CONTRIBUTORS'),
# commiters is interesting, and so a tag of its own
(r'[Cc]ommitters\.??', 'COMMIT'),
# same for maintainers, developers, admins.
(r'^([Aa]dmins?|[Mm]aintainers?\.?|co-maintainers?|[Dd]evelopers?\.?)$', 'MAINT'),
# same for developed, etc...
(r'^(([Rr]e)?[Cc]oded|[Mm]odified|[Mm]ai?nt[ea]ine(d|r)|[Cc]reated|[Ww]ritten|[Dd]eveloped)$', 'AUTH2'),
# author
(r'@author', 'AUTH'),
# of
(r'^[Oo][Ff]$', 'OF'),
# of
(r'^[Dd][Eei]$', 'OF'),
# in
(r'^(in|en)$', 'IN'),
# by
(r'^by|BY|By$', 'BY'),
# FIXMEL following is used NOWHERE
(r'^following$', 'FOLLOW'),
# conjunction: and
(r'^([Aa]nd|&|[Uu]nd|ET|[Ee]t|at|and/or)$', 'CC'),
# conjunction: or. Even though or is not conjunctive ....
# (r'^or$', 'CC'),
# ie. in things like "Copyright (c) 2012 John Li and others"
# or et.al.
(r'^[Oo]ther?s|et\.al[\.,]?$', 'OTH'),
# in year ranges: dash, or 'to': "1990-1995", "1990/1995" or "1990 to 1995"
(r'^([-/]|to)$', 'DASH'),
# explicitly ignoring these words: FIXME: WHY?
(r'^([Tt]his|THIS|[Pp]ermissions?|PERMISSIONS?|All)$', 'NN'),
# Portions copyright .... are worth keeping
(r'[Pp]ortions?|[Pp]arts?', 'PORTIONS'),
# in dutch/german names, like Marco van Basten, or Klemens von Metternich
# and Spanish/French Da Siva and De Gaulle
(r'^(([Vv][ao]n)|[Dd][aeu])$', 'VAN'),
# rare cases of trailing + signon years
(r'^20[0-1][0-9]\+$', 'YR-PLUS'),
# year or year ranges
# plain year with various leading and trailing punct
# dual or multi years 1994/1995. or 1994-1995
# 1987,88,89,90,91,92,93,94,95,96,98,99,2000,2001,2002,2003,2004,2006
# multi years
# dual years with second part abbreviated
# 1994/95. or 2002-04 or 1991-9
(r'^' + _PUNCT + _YEAR_OR_YEAR_YEAR_WITH_PUNCT + '+' +
'(' +
_YEAR_OR_YEAR_YEAR_WITH_PUNCT +
'|' +
_YEAR_THEN_YEAR_SHORT +
')*' + '$', 'YR'),
(r'^' + _PUNCT + _YEAR_OR_YEAR_YEAR_WITH_PUNCT + '+' +
'(' +
_YEAR_OR_YEAR_YEAR_WITH_PUNCT +
'|' +
_YEAR_THEN_YEAR_SHORT +
'|' +
_YEAR_SHORT_PUNCT +
')*' + '$', 'YR'),
# 88, 93, 94, 95, 96: this is a pattern mostly used in FSF copyrights
(r'^[8-9][0-9],$', 'YR'),
# cardinal numbers
(r'^-?[0-9]+(.[0-9]+)?.?$', 'CD'),
# exceptions to proper nouns
(r'^(The|Commons|[Ii]ntltool|[Tt]ext|software|Permissions?|Natural'
r'|Docs?|Jsunittest|Asset|Packaging|Tool|Android|Win32|Do|Xalan'
r'|Programming|Objects|Material|Improvement|Example|COPYING'
r'|Experimental|Additional|So)$', 'NN'),
# composed proper nouns, ie. Jean-Claude or ST-Microelectronics
# FIXME: what about a variant with spaces around the dash?
(r'^[A-Z][a-zA-Z]*\s?[\-]\s?[A-Z]?[a-zA-Z]+.?$', 'NNP'),
# Countries abbreviations
(r'^U\.S\.A\.?$', 'NNP'),
# Dotted ALL CAPS initials
(r'^([A-Z]\.){1,3}$', 'NNP'),
# misc corner cases such LaTeX3 Project and other
(r'^LaTeX3$', 'NNP'),
(r'^Meridian\'93|Xiph.Org|iClick,?$', 'NNP'),
# This_file_is_part_of_KDE
(r'^[Tt]his_file_is_part_of_KDE$', 'NNP'),
# proper nouns with digits
(r'^([A-Z][a-z0-9]+){1,2}.?$', 'NNP'),
# saxon genitive, ie. Philippe's
(r"^[A-Z][a-z]+[']s$", 'NNP'),
# Uppercase dotted name, ie. P.
(r"^([A-Z][.]?|[A-Z]+[\.])$", 'PN'),
# proper noun with some separator and trailing comma
(r"^[A-Z]+[.][A-Z][a-z]+[,]?$", 'NNP'),
# proper noun with apostrophe ': D'Orleans, D'Arcy, T'so, Ts'o
(r"^[A-Z][[a-z]?['][A-Z]?[a-z]+[,.]?$", 'NNP'),
# proper noun with apostrophe ': d'Itri
(r"^[a-z]['][A-Z]?[a-z]+[,\.]?$", 'NNP'),
# all CAPS word, at least 1 char long such as MIT, including an optional trailing comma or dot
(r'^[A-Z0-9]+[,]?$', 'CAPS'),
# all caps word 3 chars and more, enclosed in parens
(r'^\([A-Z0-9]{2,}\)$', 'CAPS'),
# proper noun: first CAP, including optional trailing comma
# note: this also captures a bare comma as an NNP ... this is a bug
(r'^(([A-Z][a-zA-Z0-9]+){,2},?)$', 'NNP'),
# all CAPS word, all letters including an optional trailing single quote
(r"^[A-Z]{2,}\'?$", 'CAPS'),
# email eventually in parens or brackets with some trailing punct.
(r'^[\<\(]?[a-zA-Z0-9]+[a-zA-Z0-9\+_\-\.\%]*(@|at)[a-zA-Z0-9][a-zA-Z0-9\+_\-\.\%]+\.[a-zA-Z]{2,5}?[\>\)\.\,]*$', 'EMAIL'),
# URLS such as <(http://fedorahosted.org/lohit)>
(r'[<\(]https?:.*[>\)]', 'URL'),
# URLS such as ibm.com
(r'\s?[a-z0-9A-Z\-\.\_]+\.(com|net|info|org|us|mil|io|edu|co\.[a-z][a-z]|eu|ch|biz)\s?\.?$', 'URL2'),
# TODO: add more extensions?
# URL wrapped in ()
(r'[\(<]+\s?[a-z0-9A-Z\-\.\_]+\.(com|net|info|org|us|mil|io|edu|co\.[a-z][a-z]|eu|ch|biz)\s?[\.\)>]+$', 'URL'),
(r'<?a?.(href)?.\(?[a-z0-9A-Z\-\.\_]+\.(com|net|info|org|us|mil|io|edu|co\.[a-z][a-z]|eu|ch|biz)[\.\)>]?$', 'URL'),
# derived from regex in cluecode.finder
(r'<?a?.(href)?.('
r'(?:http|ftp|sftp)s?://[^\s<>\[\]"]+'
r'|(?:www|ftp)\.[^\s<>\[\]"]+'
r')\.?>?', 'URL'),
(r'^\(?<?https?://[a-zA-Z0-9_\-]+(\.([a-zA-Z0-9_\-])+)+.?\)?>?$', 'URL'),
# URLS with trailing/ such as http://fedorahosted.org/lohit/
# URLS with leading( such as (http://qbnz.com/highlighter/
(r'\(?https?:.*/', 'URL'),
# K.K. (a company suffix), needs special handling
(r'^K.K.,?$', 'NAME'),
# comma as a conjunction
(r'^,$', 'CC'),
# .\" is not a noun
(r'^\.\\\?"?$', 'JUNK'),
# Mixed cap nouns (rare) LeGrande
(r'^[A-Z][a-z]+[A-Z][a-z]+[\.\,]?$', 'MIXEDCAP'),
# weird year
(r'today.year', 'YR'),
# communications
(r'communications', 'NNP'),
# Code variable names including snake case
(r'^.*(_.*)+$', 'JUNK'),
# nouns (default)
(r'.+', 'NN'),
]
# Comments in the Grammar are lines that start with #
grammar = """
#######################################
# YEARS
#######################################
YR-RANGE: {<YR>+ <CC>+ <YR>} #20
YR-RANGE: {<YR> <DASH>* <YR|CD>+} #30
YR-RANGE: {<CD>? <YR>+} #40
YR-RANGE: {<YR>+ } #50
YR-AND: {<CC>? <YR>+ <CC>+ <YR>} #60
YR-RANGE: {<YR-AND>+} #70
YR-RANGE: {<YR-RANGE>+ <DASH>?} #72
#######################################
# NAMES and COMPANIES
#######################################
# two CC such as ", and" are treated as a single CC
CC: {<CC><CC>} #73
NAME: {<NAME><NNP>} #75
NAME: {<NN|NNP> <CC> <URL|URL2>} #80
# the Tor Project, Inc.
COMP: {<COMP> <COMP>+} #81
# Laboratory for Computer Science Research Computing Facility
COMPANY: {<COMP> <NN> <NNP> <NNP> <COMP> <NNP> <COMP>} #83
COMPANY: {<COMP> <NN> <NNP> <NNP> <COMP>} #82
# E. I. du Pont de Nemours and Company
COMPANY: {<NNP> <NNP> <VAN> <NNP> <OF> <NNP> <CC> <COMP>} #1010
# Robert A. van Engelen OR NetGroup, Politecnico di Torino (Italy)
NAME: {<NNP>+ <VAN|OF> <NNP>+} #88
NAME: {<NNP> <VAN|OF> <NN*> <NNP>} #90
NAME: {<NNP> <PN> <VAN> <NNP>} #100
# by the netfilter coreteam <coreteam@netfilter.org>
NAME: {<BY> <NN>+ <EMAIL>} #110
# Kaleb S. KEITHLEY
NAME: {<NNP> <PN> <CAPS>} #120
# Trolltech AS, Norway.
NAME: {<NNP> <CAPS> <NNP>} #121
# BY GEORGE J. CARRETTE
NAME: {<BY> <CAPS> <PN> <CAPS>} #85
DASHCAPS: {<DASH> <CAPS>}
# INRIA - CIRAD - INRA
COMPANY: { <COMP> <DASHCAPS>+} #1280
# Project Admins leethomason
COMPANY: { <COMP> <MAINT> <NNP>+} #1281
# the Regents of the University of California
COMPANY: {<BY>? <NN> <NNP> <OF> <NN> <UNI> <OF> <COMPANY|NAME|NAME2|NAME3><COMP>?} #130
# Free Software Foundation, Inc.
COMPANY: {<NNP> <NNP> <COMP> <COMP>} #135
# Mediatrix Telecom, inc. <ericb@mediatrix.com>
COMPANY: {<NNP>+ <COMP> <EMAIL>} #136
# Corporation/COMP for/NN National/NNP Research/COMP Initiatives/NNP
COMPANY: {<COMP> <NN> <NNP> <COMP> <NNP>} #140
# Sun Microsystems, Inc. Mountain View
COMPANY: {<COMP> <COMP> <NNP><NNP>} #144
# AT&T Laboratories, Cambridge
COMPANY: {<COMP> <COMP> <NNP>} #145
# rare "Software in the public interest, Inc."
COMPANY: {<COMP> <CD> <COMP>} #170
COMPANY: {<NNP> <IN><NN> <NNP> <NNP>+<COMP>?} #180
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
COMPANY: {<NNP> <NNP> <CC> <NNP> <COMP> <NNP> <CAPS>}
COMPANY: {<NNP> <CC> <NNP> <COMP> <NNP>?} #200
# Android Open Source Project, 3Dfx Interactive, Inc.
COMPANY: {<NN>? <NN> <NNP> <COMP>} #205
NAME: {<NNP> <NNP> <COMP> <CONTRIBUTORS> <URL|URL2>} #206
# Thai Open Source Software Center Ltd
# NNP NN NNP NNP COMP COMP')
COMPANY: {<NNP> <NN> <NNP> <NNP> <COMP>+} #207
# was: COMPANY: {<NNP|CAPS> <NNP|CAPS>? <NNP|CAPS>? <NNP|CAPS>? <NNP|CAPS>? <NNP|CAPS>? <COMP> <COMP>?} #210
COMPANY: {<NNP|CAPS>+ <COMP>+} #210
COMPANY: {<UNI|NNP> <VAN|OF> <NNP>+ <UNI>?} #220
COMPANY: {<NNP>+ <UNI>} #230
COMPANY: {<UNI> <OF> <NN|NNP>} #240
COMPANY: {<COMPANY> <CC> <COMPANY>} #250
# University of Southern California, Information Sciences Institute (ISI)
COMPANY: {<COMPANY> <COMPANY> <CAPS>} #251
# GNOME i18n Project for Vietnamese
COMPANY: {<CAPS> <NN> <COMP> <NN> <NNP>} #253
COMPANY: {<CAPS> <NN> <COMP>} #255
# Project contributors
COMPANY: {<COMP> <CONTRIBUTORS>} #256
COMPANY: {<COMP>+} #260
# Nokia Corporation and/or its subsidiary(-ies)
COMPANY: {<COMPANY> <CC> <NN> <COMPANY>} #265
COMPANY: {<COMPANY> <CC> <NNP>+} #270
# AIRVENT SAM s.p.a - RIMINI(ITALY)
COMPANY: {<COMPANY> <DASH> <NNP|NN> <EMAIL>?} #290
# Typical names
#John Robert LoVerso
NAME: {<NNP> <NNP> <MIXEDCAP>} #340
# Kaleb S. KEITHLEY
NAME: {<NNP> <NNP> <CAPS>} #345
# Academy of Motion Picture Arts
NAME: {<NNP|PN>+ <NNP>+} #351
# Joe DASILVA
NAME: {<NNP> <CAPS>} #352
# <s> Gangadharan N </s>
NAME: {<NNP> <PN>+} #353
NAME: {<NNP> <NN|NNP> <EMAIL>} #390
NAME: {<NNP> <PN|VAN>? <PN|VAN>? <NNP>} #400
NAME: {<NNP> <NN> <NNP>} #410
NAME: {<NNP> <COMMIT>} #420
# the LGPL VGABios developers Team
NAME: {<NN>? <NNP> <MAINT> <COMP>} #440
# Debian Qt/KDE Maintainers
NAME: {<NNP> <NN>? <MAINT>} #460
NAME: {<NN> <NNP> <ANDCO>} #470
NAME: {<NN>? <NNP> <CC> <NAME>} #480
NAME: {<NN>? <NNP> <OF> <NN>? <NNP> <NNP>?} #490
# Academy of Motion Picture Arts and Sciences
NAME: {<NNP|PN>+ <CC>+ <NNP>+} #350again
NAME: {<NAME> <CC> <NAME>} #500
COMPANY: {<NNP> <IN> <NN>? <COMPANY>} #510
# and Josh MacDonald.
NAME: {<CC> <NNP> <MIXEDCAP>} #480
NAME: {<NAME> <UNI>} #483
# Kungliga Tekniska Hogskolan (Royal Institute of Technology, Stockholm, Sweden)
COMPANY: { <COMPANY> <OF> <COMPANY> <NAME> } #529
# Instituto Nokia de Tecnologia
COMPANY: { <COMPANY> <NNP> <OF> <COMPANY>} # 5391
# Laboratoire MASI - Institut Blaise Pascal
COMPANY: { <COMPANY> <CAPS> <DASH> <COMPANY> <NAME>} #5292
# Nara Institute of Science and Technology.
COMPANY: { <COMPANY> <OF> <NNP> <CC> <COMPANY> } #5293
NAME2: {<NAME> <EMAIL>} #530
NAME3: {<YR-RANGE> <NAME2|COMPANY>+} #535
NAME3: {<YR-RANGE> <NAME2|COMPANY>+ <CC> <YR-RANGE>} #540
NAME: {<NAME|NAME2>+ <OF> <NNP> <OF> <NN>? <COMPANY>} #550
NAME: {<NAME|NAME2>+ <CC|OF>? <NAME|NAME2|COMPANY>} #560
# FIXME HIGHLY LIKELY SCREWED LAST MOD
# strip Software from Copyright (c) Ian Darwin 1995. Software
NAME3: {<NAME>+ <YR-RANGE>} #5611
NAME3: {<YR-RANGE> <NNP>+ <CAPS>?} #5612
#Academy of Motion Picture Arts and Sciences
NAME: { <NAME> <CC> <NNP>} # 561
# Adam Weinberger and the GNOME Foundation
NAME: {<CC> <NN> <COMPANY>} # 565
# (c) 1991-1992, Thomas G. Lane , Part of the Independent JPEG Group's
NAME: {<PORTIONS> <OF> <NN> <NAME>+} #566
NAME3: {<YR-RANGE> <NAME>+ <CONTRIBUTORS>?} #570
NAME: {<NNP> <OF> <NNP>} #580
NAME: {<NAME> <NNP>} #590
NAME: {<NN|NNP|CAPS>+ <CC> <OTH>} #600
NAME: {<NNP> <CAPS>} #610
NAME: {<CAPS> <DASH>? <NNP|NAME>} #620
NAME: {<NNP> <CD> <NNP>} #630
NAME: {<COMP> <NAME>+} #640
# and other contributors
NAME: {<CC> <NN>? <CONTRIBUTORS>} #644
NAME: {<NNP|CAPS>+ <AUTHS|CONTRIBUTORS>} #660
NAME: {<VAN|OF> <NAME>} #680
NAME: {<NAME3> <COMP|COMPANY>} #690
# more names
NAME: {<NNP> <NAME>} #710
NAME: {<CC>? <IN> <NAME|NNP>} #720
NAME: {<NAME><UNI>} #730
NAME: { <NAME> <IN> <NNP> <CC|IN>+ <NNP>} #740
# by BitRouter <www.BitRouter.com>
NAME: { <BY> <NNP> <URL>} #741
# Philippe http//nexb.com joe@nexb.com
NAME: { <NNP> <URL> <EMAIL>} #742
# Companies
COMPANY: {<NAME|NAME2|NAME3|NNP>+ <OF> <NN>? <COMPANY|COMP> <NNP>?} #770
COMPANY: {<NNP> <COMP|COMPANY> <COMP|COMPANY>} #780
COMPANY: {<NN>? <COMPANY|NAME|NAME2> <CC> <COMPANY|NAME|NAME2>} #790
COMPANY: {<COMP|COMPANY|NNP> <NN> <COMPANY|COMPANY> <NNP>+} #800
# by the Institute of Electrical and Electronics Engineers, Inc.
COMPANY: {<BY> <NN> <COMPANY> <OF> <NNP> <CC> <COMPANY>}
COMPANY: {<COMPANY> <CC> <AUTH|CONTRIBUTORS|AUTHS>} #810
COMPANY: {<NN> <COMP|COMPANY>+} #820
COMPANY: {<URL|URL2>} #830
COMPANY: {<COMPANY> <COMP|COMPANY>} #840
# University Corporation for Advanced Internet Development, Inc.
COMPANY: {<UNI> <COMPANY>} #845
# The Regents of the University of California
NAME: {<NN> <NNP> <OF> <NN> <COMPANY>} #870
# Trailing Authors
COMPANY: {<NAME|NAME2|NNP>+ <CONTRIBUTORS>} #900
# Jeffrey C. Foo
COMPANY: {<PN> <COMP|COMPANY>} #910
# "And" some name
ANDCO: {<CC> <NNP> <NNP>+} #930
ANDCO: {<CC> <OTH>} #940
ANDCO: {<CC> <NN> <NAME>+} #950
# Copyright 2005-2007 <s>Christopher Montgomery</s>, <s>Jean-Marc Valin</s>, <s>Timothy Terriberry</s>, <s>CSIRO</s>, and other contributors
ANDCO: {<CC> <CAPS|COMPANY|NAME|NAME2|NAME3>+} #960
COMPANY: {<COMPANY|NAME|NAME2|NAME3> <ANDCO>+} #970
NAME: {<NNP> <ANDCO>+} #980
NAME: {<BY> <NN> <AUTH|CONTRIBUTORS|AUTHS>} #1000
# NetGroup, Politecnico di Torino (Italy)
COMPANY: {<NNP> <COMPANY> <NN|NNP>} #1030
# Arizona Board of Regents (University of Arizona)
NAME: {<COMPANY> <OF> <NN|NNP>} #1060
# The Regents of the University of California
NAME: {<NAME> <COMPANY>} #1090
# John Doe and Myriam Doe
NAME: {<NAME|NNP> <CC> <NNP|NAME>} #1120
# International Business Machines Corporation and others
COMPANY: {<COMPANY> <CC> <OTH>} #1150
COMPANY: {<NAME3> <CC> <OTH>} #1160
# Nara Institute of Science and Technology.
COMPANY: {<NNP> <COMPANY> <CC> <COMP>} #1190
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
COMPANY: {<NNP> <COMPANY> <NAME>} #1220
# (The) Android Open Source Project
COMPANY: {<NN><NN><NN>? <COMPANY>} #1250
# Bio++ Development Team
COMPANY: {<NN> <NNP> <COMPANY>} #1251
# Institut en recherche ....
COMPANY: {<NNP> <IN> <NN>+ <COMPANY>} #1310
# OU OISTE Foundation
COMPANY: {<OU> <COMPANY>} #1340
# MIT, W3C, NETLABS Temple University
COMPANY: {<CAPS>+ <COMPANY>} #1370
# XZY emails
COMPANY: {<COMPANY> <EMAIL>+} #1400
# by the a href http wtforms.simplecodes.com WTForms Team
COMPANY: {<BY> <NN>+ <COMP|COMPANY>} #1420
# the Regents of the University of California, Sun Microsystems, Inc., Scriptics Corporation
COMPANY: {<NN> <NNP> <OF> <NN> <UNI> <OF> <COMPANY>+}
# Copyright (c) 1998-2000 University College London
COMPANY: {<UNI> <UNI> <NNP>}
# "And" some name
ANDCO: {<CC>+ <NN> <NNP>+<UNI|COMP>?} #1430
ANDCO: {<CC>+ <NNP> <NNP>+<UNI|COMP>?} #1440
ANDCO: {<CC>+ <COMPANY|NAME|NAME2|NAME3>+<UNI|COMP>?} #1450
COMPANY: {<COMPANY|NAME|NAME2|NAME3> <ANDCO>+} #1460
COMPANY: {<COMPANY><COMPANY>+} #1480
# Copyright (c) 2002 World Wide Web Consortium, (Massachusetts Institute of Technology, Institut National de Recherche en Informatique et en Automatique, Keio University).
COMPANY: {<CC> <IN> <COMPANY>} #1490
# Oracle and/or its affiliates.
NAME: {<NNP> <ANDCO>} #1410
# the University of California, Berkeley and its contributors.
COMPANY: {<COMPANY> <CC> <NN> <CONTRIBUTORS>} #1411
# UC Berkeley and its contributors
NAME: {<NAME> <CC> <NN> <CONTRIBUTORS>} #1412
#copyrighted by Douglas C. Schmidt and his research group at Washington University, University of California, Irvine, and Vanderbilt University, Copyright (c) 1993-2008,
COMPANY: {<NAME> <CC> <NN> <COMPANY>+} #1413
# The University of Utah and the Regents of the University of California
COMPANY: {<NN> <COMPANY> <CC> <NN> <COMPANY>} #1414
# by the Massachusetts Institute of Technology
COMPANY: { <BY> <COMPANY> <OF> <COMPANY>} #1415
# Computer Systems and Communication Lab, Institute of Information Science, Academia Sinica.
COMPANY: { <NNP> <COMPANY> <OF> <COMPANY> <NNP>} #1416
# Copyright 2007-2010 the original author or authors.
# Copyright (c) 2007-2010 the original author or authors.
NAME: {<NN> <JUNK> <AUTH|CONTRIBUTORS|AUTHS> <NN> <AUTH|CONTRIBUTORS|AUTHS>} #1960
# Copyright (C) <s>Suresh P <suresh@ippimail.com></s>
NAME: {<NNP> <PN> <EMAIL>}
#######################################
# VARIOUS FORMS OF COPYRIGHT
#######################################
COPYRIGHT: {<COPY> <NAME> <COPY> <YR-RANGE>} #1510
COPYRIGHT: {<COPY>+ <BY>? <COMPANY|NAME*|YR-RANGE>* <BY>? <EMAIL>+} #1530
COPYRIGHT: {<COPY>+ <NAME|NAME2|NAME3> <CAPS> <YR-RANGE>} #1550
#Copyright . 2008 Mycom Pany, inc.
COPYRIGHT: {<COPY>+ <NN> <NAME3>} #1560
COPYRIGHT: {<COPY> <COPY>? <NAME|NAME2|NAME3>+ <YR-RANGE>*} #1570
COPYRIGHT: {<COPY>+ <CAPS|NNP>+ <CC> <NN> <COPY> <YR-RANGE>?} #1590
COPYRIGHT: {<COPY>+ <BY>? <COMPANY|NAME*|NAME2*>+ <YR-RANGE>*} #1610
COPYRIGHT: {<NNP>? <COPY>+ (<YR-RANGE>+ <BY>? <NN>? <COMPANY|NAME|NAME2>+ <EMAIL>?)+} #1630
COPYRIGHT: {<COPY>+ <NN> <NAME> <YR-RANGE>} #1650
COPYRIGHT: {<COPY>+ <BY> <NAME|NAME2|NAME3>+} #1670
COPYRIGHT: {<COPY> <COPY> <COMP>+} #1690
COPYRIGHT: {<COPY> <COPY> <NN>+ <COMPANY|NAME|NAME2>+} #1710
COPYRIGHT: {<COPY>+ <NN> <NN>? <COMP> <YR-RANGE>?} #1730
COPYRIGHT: {<COPY>+ <NN> <NN>? <COMP> <YR-RANGE>?} #1750
COPYRIGHT: {<COPY> <NN> <NN>? <COMPANY> <YR-RANGE>?} #1760
COPYRIGHT: {<COPY>+ <YR-RANGE|NNP> <CAPS|BY>? <NNP|YR-RANGE|NAME>+} #1780
COPYRIGHT: {<COPY> <COPY> <NNP>+} #1800
# Copyright (c) 2003+ Evgeniy Polyakov <johnpol@2ka.mxt.ru>
COPYRIGHT: {<COPY> <COPY> <YR-PLUS> <NAME|NAME2|NAME3>+} #1801
# Copyright (c) 2016 Project Admins foobar
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE>+ <COMP> <NNP> <NN>} #1830
# Copyright (c) 1995, 1996 The President and Fellows of Harvard University
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <NN> <NNP> <ANDCO>} #1860
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <NN> <AUTH|CONTRIBUTORS|AUTHS>} #1880
# Copyright 1999, 2000 - D.T.Shield.
# Copyright (c) 1999, 2000 - D.T.Shield.
COPYRIGHT2: {<COPY>+ <YR-RANGE> <DASH> <NN>} #1920
#(c) 2017 The Chromium Authors
COPYRIGHT2: {<COPY>+ <YR-RANGE> <NN> <NNP> <NN>} #1990
# Copyright (C) Research In Motion Limited 2010. All rights reserved.
COPYRIGHT2: {<COPYRIGHT> <COMPANY> <YR-RANGE>} #2020
# Copyright (c) 1999 Computer Systems and Communication Lab,
# Institute of Information Science, Academia Sinica.
COPYRIGHT2: {<COPYRIGHT> <COMPANY> <COMPANY>} #2060
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <BY> <NN> <NN> <NAME>} #2080
COPYRIGHT2: {<COPY> <YR-RANGE> <BY> <NN> <NN> <NAME>} #2090
COPYRIGHT2: {<COPY> <COPY><NN>? <COPY> <YR-RANGE> <BY> <NN>} #2110
# Copyright (c) 1992-2002 by P.J. Plauger.
COPYRIGHT2: {<COPY> <NN>? <COPY> <YR-RANGE> <BY> <NN> <NNP>?} #2115
COPYRIGHT2: {<COPY>+ <NN> <YR-RANGE> <BY> <NAME>} #2140
COPYRIGHT2: {<COPY>+ <YR-RANGE> <DASH> <BY>? <NAME2|NAME>} #2160
COPYRIGHT2: {<COPY>+ <YR-RANGE> <NNP> <NAME>} #2180
# Copyright (c) 2012-2016, Project contributors
COPYRIGHT2: {<COPY>+ <YR-RANGE> <COMP> <AUTHS|CONTRIBUTORS>} #2210
COPYRIGHT2: {<COPY>+ <YR-RANGE> <COMP>} #2230
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE>+ <CAPS>? <MIXEDCAP>} #2240
COPYRIGHT2: {<NAME> <COPY> <YR-RANGE>} #2260
# Copyright 2008 TJ <linux@tjworld.net>
COPYRIGHT2: {<COPY> <YR-RANGE> <CAPS> <EMAIL>} #2270
# (c) Copyright 1985-1999 SOME TECHNOLOGY SYSTEMS
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <CAPS> <CAPS> <CAPS>? <CAPS>?} #2271
# Daisy (c) 1998
NAME4: {<NNP> <COPY>} #2272
COPYRIGHT2: {<NAME4> <YR-RANGE>} #2273
# Scilab (c)INRIA-ENPC.
COPYRIGHT: {<NAME4> <NNP>} #2274
# Copyright 1994-2007 (c) RealNetworks, Inc.
COPYRIGHT: {<COPY>+ <YR-RANGE> <COPYRIGHT>} #2274
# Copyright (c) 2017 Contributors et.al.
COPYRIGHT: { <COPY> <COPY> <YR-RANGE> <CONTRIBUTORS> <OTH> } #2276
COPYRIGHT2: {<COPY>+ <NN|CAPS>? <YR-RANGE>+ <PN>*} #2280
COPYRIGHT2: {<COPY>+ <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>* <COMPANY>?} #2300
COPYRIGHT2: {<COPY>+ <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>* <DASH> <COMPANY>} #2320
COPYRIGHT2: {<NNP|NAME|COMPANY> <COPYRIGHT2>} #2340
COPYRIGHT: {<COPYRIGHT> <NN> <COMPANY>} #2360
COPYRIGHT: {<COPY>+ <BY>? <NN> <COMPANY>} #2380
COPYRIGHT: {<COMPANY> <NN> <NAME> <COPYRIGHT2>} #2400
COPYRIGHT: {<COPYRIGHT2> <COMP> <COMPANY>} #2410
COPYRIGHT: {<COPYRIGHT2> <NNP> <CC> <COMPANY>} #2430
COPYRIGHT: {<COPYRIGHT2> <NAME|NAME2|NAME3>+} #2860
# Copyright (c) 1996 Adrian Rodriguez (adrian@franklins-tower.rutgers.edu) Laboratory for Computer Science Research Computing Facility
COPYRIGHT: {<COPYRIGHT> <NAME>} #2400
# copyrights in the style of Scilab/INRIA
COPYRIGHT: {<NNP> <NN> <COPY> <NNP>} #2460
COPYRIGHT: {<NNP> <COPY> <NNP>} #2470
# Copyright or Copr. 2006 INRIA - CIRAD - INRA
COPYRIGHT: {<COPY> <NN> <COPY> <YR-RANGE>+ <COMPANY>+} #2500
COPYRIGHT: {<COPYRIGHT|COPYRIGHT2> <COMPANY>+ <NAME>*} #2580
# iClick, Inc., software copyright (c) 1999
COPYRIGHT: {<ANDCO> <NN>? <COPYRIGHT2>} #2590
# portions copyright
COPYRIGHT: {<PORTIONS> <COPYRIGHT|COPYRIGHT2>} #2610
#copyright notice (3dfx Interactive, Inc. 1999), (notice is JUNK)
COPYRIGHT: {<COPY> <JUNK> <COMPANY> <YR-RANGE>} #2620
# Copyright (C) <2013>, GENIVI Alliance, Inc.
COPYRIGHT: {<COPYRIGHT2> <ANDCO>} #2625
# copyright C 1988 by the Institute of Electrical and Electronics Engineers, Inc.
COPYRIGHT: {<COPY> <PN> <YR-RANGE> <BY> <COMPANY> } #2630
# Copyright 1996-2004, John LoVerso.
COPYRIGHT: {<COPYRIGHT> <MIXEDCAP> } #2632
# Copyright (C) 1992, 1993, 1994, 1995 Remy Card (card@masi.ibp.fr) Laboratoire MASI - Institut Blaise Pascal
COPYRIGHT: {<COPYRIGHT> <DASH> <NAME>} #2634
# Copyright 2002, 2003 University of Southern California, Information Sciences Institute
COPYRIGHT: {<COPYRIGHT> <NN> <NAME>} #2635
# Copyright 2008 TJ <linux@tjworld.net>
COPYRIGHT: {<COPYRIGHT2> <EMAIL>} #2636
COPYRIGHT: {<COPYRIGHT> <CAPS> <NAME2>} #2637
# maintainer Norbert Tretkowski <nobse@debian.org> 2005-04-16
AUTHOR: {<BY|MAINT> <NAME2> <YR-RANGE>?} #26382
# Russ Dill <Russ.Dill@asu.edu> 2001-2003
COPYRIGHT: {<NAME2> <YR-RANGE>} #2638
# (C) 2001-2009, <s>Takuo KITAME, Bart Martens, and Canonical, LTD</s>
COPYRIGHT: {<COPYRIGHT> <NNP> <COMPANY>} #26381
#Copyright Holders Kevin Vandersloot <kfv101@psu.edu> Erik Johnsson <zaphod@linux.nu>
COPYRIGHT: {<COPY> <HOLDER> <NAME>} #26383
#Copyright (c) 1995, 1996 - Blue Sky Software Corp.
COPYRIGHT: {<COPYRIGHT2> <DASH> <COMPANY>} #2639
#copyright 2000-2003 Ximian, Inc. , 2003 Gergo Erdi
COPYRIGHT: {<COPYRIGHT> <NNP> <NAME3>} #1565
#2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
COPYRIGHT: {<YR-PLUS> <COPYRIGHT>} #1566
# Copyright (c) 1992 David Giller, rafetmad@oxy.edu 1994, 1995 Eberhard Moenkeberg, emoenke@gwdg.de 1996 David van Leeuwen, david@tm.tno.nl
COPYRIGHT: {<COPYRIGHT> <EMAIL>} #2000
COPYRIGHT: {<COPYRIGHT> <NAME|NAME3>+} #2001
# copyright by M.I.T. or by MIT
COPYRIGHT: {<COPY> <BY> <NNP|CAPS>} #2002
# Copyright property of CompuServe Incorporated.
COPYRIGHT: {<COPY> <NN> <OF> <COMPANY>} #2003
# Copyright (c) 2005 DMTF.
COPYRIGHT: {<COPY> <YR-RANGE> <PN>} #2004
# Copyright (c) YEAR This_file_is_part_of_KDE
COPYRIGHT: {<COPY> <COPY> <CAPS>} #2005
# copyright by the Free Software Foundation
COPYRIGHT: {<COPY> <BY> <NN>? <NNP>? <COMPANY>} #2006
# copyright C 1988 by the Institute of Electrical and Electronics Engineers, Inc
COPYRIGHT: {<COPY> <PN>? <YR-RANGE> <BY> <NN> <NAME>} #2007
# COPYRIGHT (c) 2006 - 2009 DIONYSOS
COPYRIGHT: {<COPYRIGHT2> <CAPS>} # 2008
# Copyright (C) 2000 See Beyond Communications Corporation
COPYRIGHT2: {<COPYRIGHT2> <JUNK> <COMPANY>} # 2010
# copyright C 1988 by the Institute of Electrical and Electronics Engineers, Inc.
COPYRIGHT: {<COPY> <PN> <YR-RANGE> <COMPANY>}
COPYRIGHT2: {<NAME4> <COPYRIGHT2>} #2274
# (C) COPYRIGHT 2004 UNIVERSITY OF CHICAGO
COPYRIGHT: {<COPYRIGHT2> <UNI> <OF> <CAPS>} #2276
#Copyright or Copr. CNRS
NAME5: {<CAPS>+} #2530
#Copyright or Copr. CNRS
COPYRIGHT: {<COPY> <NN> <COPY> <COPYRIGHT|NAME5>} #2560
COPYRIGHT: {<COPYRIGHT2> <BY> <NAME5>} #2561
# Copyright (c) 2004, The Codehaus
COPYRIGHT: {<COPYRIGHT2> <NN> <NNP>} #2562
# Copyright (c) 2007-2014 IOLA and Ole Laursen.
COPYRIGHT: {<COPYRIGHT> <ANDCO>}
# Authors
# Created by XYZ
AUTH: {<AUTH2>+ <BY>} #2645
AUTHOR: {<AUTH|CONTRIBUTORS|AUTHS>+ <NN>? <COMPANY|NAME|YR-RANGE>* <BY>? <EMAIL>+} #2650
AUTHOR: {<AUTH|CONTRIBUTORS|AUTHS>+ <NN>? <COMPANY|NAME|NAME2|NAME3>+ <YR-RANGE>*} #2660
AUTHOR: {<AUTH|CONTRIBUTORS|AUTHS>+ <YR-RANGE>+ <BY>? <COMPANY|NAME|NAME2>+} #2670
AUTHOR: {<AUTH|CONTRIBUTORS|AUTHS>+ <YR-RANGE|NNP> <NNP|YR-RANGE>+} #2680
AUTHOR: {<AUTH|CONTRIBUTORS|AUTHS>+ <NN|CAPS>? <YR-RANGE>+} #2690
AUTHOR: {<COMPANY|NAME|NAME2>+ <AUTH|CONTRIBUTORS|AUTHS>+ <YR-RANGE>+} #2700
AUTHOR: {<YR-RANGE> <NAME|NAME2>+} #2710
AUTHOR: {<BY> <CC>? <NAME2>+} #2720
AUTHOR: {<AUTH|CONTRIBUTORS|AUTHS>+ <NAME2>+} #2720
AUTHOR: {<AUTHOR> <CC> <NN>? <AUTH|AUTHS>} #2730
AUTHOR: {<BY> <EMAIL>} #2740
ANDAUTH: {<CC> <AUTH|NAME|CONTRIBUTORS>+} #2750
AUTHOR: {<AUTHOR> <ANDAUTH>+} #2760
# developed by Mitsubishi and NTT.
AUTHOR: {<AUTH|AUTHS|AUTH2> <BY>? <NNP> <CC> <PN>}
# Compounded statements usings authors
# found in some rare cases with a long list of authors.
COPYRIGHT: {<COPY> <BY> <AUTHOR>+ <YR-RANGE>*} #2800
COPYRIGHT: {<AUTHOR> <COPYRIGHT2>} #2820
COPYRIGHT: {<AUTHOR> <YR-RANGE>} #2830
"""
def strip_numbers(s):
"""
Return a string removing words made only of numbers. If there is an
exception or s is not a string, return s as-is.
"""
if s:
s = u' '.join([x for x in s.split(' ') if not x.isdigit()])
return s
def strip_some_punct(s):
"""
Return a string stripped from some leading and trailing punctuations.
"""
if s:
s = s.strip(''','"}{-_:;&''')
s = s.lstrip('.>)]')
s = s.rstrip('<([')
return s
def fix_trailing_space_dot(s):
"""
Return a string stripped from some leading and trailing punctuations.
"""
if s and s.endswith(' .'):
s = s[:-2] + '.'
return s
def strip_unbalanced_parens(s, parens='()'):
"""
Return a string where unbalanced parenthesis are replaced with a space.
`paren` is a pair of characters to balance such as (), <>, [] , {}.
For instance:
>>> strip_unbalanced_parens('This is a super string', '()')
'This is a super string'
>>> strip_unbalanced_parens('This is a super(c) string', '()')
'This is a super(c) string'
>>> strip_unbalanced_parens('This ((is a super(c) string))', '()')
'This ((is a super(c) string))'
>>> strip_unbalanced_parens('This )(is a super(c) string)(', '()')
'This (is a super(c) string) '
>>> strip_unbalanced_parens(u'This )(is a super(c) string)(', '()')
u'This (is a super(c) string) '
>>> strip_unbalanced_parens('This )(is a super(c) string)(', '()')
'This (is a super(c) string) '
>>> strip_unbalanced_parens('This )((is a super(c) string)((', '()')
'This (is a super(c) string) '
>>> strip_unbalanced_parens('This ) is', '()')
'This is'
>>> strip_unbalanced_parens('This ( is', '()')
'This is'
>>> strip_unbalanced_parens('This )) is', '()')
'This is'
>>> strip_unbalanced_parens('This (( is', '()')
'This is'
>>> strip_unbalanced_parens('(', '()')
' '
>>> strip_unbalanced_parens(')', '()')
' '
"""
start, end = parens
if not start in s and not end in s:
return s
unbalanced = []
unbalanced_append = unbalanced.append
stack = []
stack_append = stack.append
stack_pop = stack.pop
for i, c in enumerate(s):
if c == start:
stack_append((i, c,))
elif c == end:
try:
stack_pop()
except IndexError:
unbalanced_append((i, c,))
unbalanced.extend(stack)
pos_to_del = set([i for i, c in unbalanced])
cleaned = [c if i not in pos_to_del else ' ' for i, c in enumerate(s)]
return type(s)('').join(cleaned)
def strip_all_unbalanced_parens(s):
"""
Return a string where unbalanced parenthesis are replaced with a space.
Strips (), <>, [] and {}.
"""
c = strip_unbalanced_parens(s, '()')
c = strip_unbalanced_parens(c, '<>')
c = strip_unbalanced_parens(c, '[]')
c = strip_unbalanced_parens(c, '{}')
return c
def refine_copyright(c):
"""
Refine a detected copyright string.
FIXME: the grammar should not allow this to happen.
"""
c = strip_some_punct(c)
# this catches trailing slashes in URL for consistency
c = c.strip('/ ')
c = fix_trailing_space_dot(c)
c = strip_all_unbalanced_parens(c)
# from .net assemblies
c = c.replace('AssemblyCopyright', 'Copyright')
# FIXME: this should be in the grammar, but is hard to get there right
# these are often artifacts of markup
c = c.replace('COPYRIGHT Copyright', 'Copyright')
c = c.replace('Copyright Copyright', 'Copyright')
c = c.replace('Copyright copyright', 'Copyright')
c = c.replace('copyright copyright', 'Copyright')
c = c.replace('copyright Copyright', 'Copyright')
c = c.replace('copyright\'Copyright', 'Copyright')
c = c.replace('copyright"Copyright', 'Copyright')
c = c.replace('copyright\' Copyright', 'Copyright')
c = c.replace('copyright" Copyright', 'Copyright')
c = c.replace('<p>', ' ')
prefixes = set([
'by',
])
s = strip_prefixes(c, prefixes)
s = s.split()
# fix traliing garbage, captured by the grammar
last_word = s[-1]
if last_word.lower() in ('parts', 'any', '0', '1'):
s = s[:-1]
# this is hard to catch otherwise, unless we split the author
# vs copyright grammar in two. Note that AUTHOR and Authors should be kept
last_word = s[-1]
if last_word.lower() == 'author' and last_word not in ('AUTHOR', 'AUTHORS', 'Authors',) :
s = s[:-1]
s = u' '.join(s)
return s
PREFIXES = frozenset([
'?',
'????',
'(insert',
'then',
'current',
'year)',
'maintained',
'by',
'developed',
'written',
'recoded',
'coded',
'modified',
'maintained'
'created',
'$year',
'year',
'uref',
'owner',
'from',
'and',
'of',
'to',
'for',
'or',
'<p>',
])
def _refine_names(s, prefixes=PREFIXES):
"""
Refine a detected holder.
FIXME: the grammar should not allow this to happen.
"""
s = strip_some_punct(s)
s = strip_numbers(s)
s = strip_all_unbalanced_parens(s)
s = strip_some_punct(s)
return strip_prefixes(s, prefixes)
JUNK_HOLDERS = frozenset([
'property',
'licensing@',
'c',
'works',
'http',
'the',
'are',
'?',
'cppyright',
'parts',
'disclaimed',
'or',
])
HOLDERS_PREFIXES = frozenset(set.union(
set(PREFIXES),
set([
'ou',
'portions',
'portion',
'notice',
'holders',
'holder',
'property',
'parts',
'part',
'at',
'cppyright',
'assemblycopyright',
'c',
'works',
'present',
'at',
])
))
HOLDERS_SUFFIXES = frozenset([
'http',
'and',
'email',
'licensing@',
'(minizip)',
'website',
])
def refine_holder(s, prefixes=HOLDERS_PREFIXES, junk_holders=JUNK_HOLDERS,
suffixes=HOLDERS_SUFFIXES):
"""
Refine a detected holder.
FIXME: the grammar should not allow this to happen.
"""
refined = _refine_names(s, prefixes)
refined = strip_suffixes(refined, suffixes)
refined = refined.strip()
if refined and refined.lower() not in junk_holders:
return refined
JUNK_AUTHORS = frozenset([
# in GNU licenses
'james hacker.',
'james random hacker.',
])
AUTHORS_PREFIXES = frozenset(set.union(
set(PREFIXES),
set(['contributor', 'contributors', 'contributor(s)',
'author', 'authors', 'author(s)', 'authored', 'created'
])
))
def refine_author(s, prefixes=AUTHORS_PREFIXES, junk_authors=JUNK_AUTHORS):
"""
Refine a detected author.
FIXME: the grammar should not allow this to happen.
"""
# FIXME: we could consider to split comma separated lists such as
# gthomas, sorin@netappi.com, andrew.lunn@ascom.che.g.
refined = _refine_names(s, prefixes)
refined = refined.strip()
if refined and refined.lower() not in junk_authors:
return refined
def strip_prefixes(s, prefixes=()):
"""
Return the `s` string with any of the string in the `prefixes` set
striped. Normalize and strip spacing.
"""
s = s.split()
# strip prefixes.
# NOTE: prefixes are hard to catch otherwise, unless we split the
# author vs copyright grammar in two
while s and s[0].lower() in prefixes:
s = s[1:]
s = u' '.join(s)
return s
def strip_suffixes(s, suffixes=()):
"""
Return the `s` string with any of the string in the `suffixes` set
striped. Normalize and strip spacing.
"""
s = s.split()
while s and s[-1].lower() in suffixes:
s = s[:-1]
s = u' '.join(s)
return s
def refine_date(c):
"""
Refine a detected date or date range.
FIXME: the grammar should not allow this to happen.
"""
return strip_some_punct(c)
# Set of statements that get detected and are junk/false positive
# note: this must be lowercase and be kept to a minimum.
# A junk copyright cannot be resolved otherwise by parsing with a grammar.
# It would be best not to have to resort to this, but this is practical.
JUNK_COPYRIGHTS = frozenset([
'(c)',
'full copyright statement',
'copyrighted by their authors',
'copyrighted by their authors.',
'copyright holder or other authorized',
'copyright holder who authorizes',
'copyright holder has authorized',
'copyright holder nor the author',
'copyright holder(s) or the author(s)',
'copyright holders and contributors',
'copyright owner or entity authorized',
'copyright owner or contributors',
'copyright and license, contributing',
'copyright for a new language file should be exclusivly the authors',
'copyright (c) year',
'copyright (c) year your name',
'copyright holder or said author',
'copyright holder, or any author',
'copyright holder and contributor',
'copyright-holder and its contributors',
'copyright holders and contributors.',
'copyrighted material, only this license, or another one contracted with the authors',
'copyright notices, authorship',
'copyright holder means the original author(s)',
"copyright notice. timevar.def's author",
'copyright copyright and',
"copyright holder or simply that it is author-maintained'.",
"copyright holder or simply that is author-maintained'.",
'(c) if you bring a patent claim against any contributor',
'copyright-check writable-files m4-check author_mark_check',
"copyright of uc berkeley's berkeley software distribution",
'(c) any recipient',
'(c) each recipient',
'copyright in section',
'u.s. copyright act',
# from a WROX license text
'copyright john wiley & sons, inc. year',
'copyright holders and contributing',
'(c) individual use.',
'copyright, license, and disclaimer',
'(c) forums',
# from the rare LATEX licenses
'copyright 2005 m. y. name',
'copyright 2003 m. y. name',
'copyright 2001 m. y. name',
'copyright. united states',
'(c) source code',
'copyright, designs and patents',
'(c) software activation.',
'(c) cockroach enterprise edition',
'attn copyright agent',
'code copyright grant',
# seen in a weird Adobe license
'copyright redistributions',
'copyright neither',
'copyright including, but not limited',
'copyright not limited',
# found in an RPM spec file COPYRIGHT: LGPL\nGROUP: ....
'copyright lgpl group',
'copyright gpl group',
# from strace-4.6/debian/changelog:
# * Add location of upstream sources to the copyright
# * Merged ARM architecture support from Jim Studt <jim@federated.com>
'copyright merged arm',
# common in sqlite
'(c) as',
# from libmng - libmng.spec
# Copyright: AS IS
# Group: System Environment/Libraries
'copyright as is group system'
])
# simple tokenization: spaces and some punctuation
splitter = re.compile('[\\t =;]+').split
class CopyrightDetector(object):
"""
Class to detect copyrights and authorship.
"""
def __init__(self):
from nltk import RegexpTagger
from nltk import RegexpParser
self.tagger = RegexpTagger(patterns)
self.chunker = RegexpParser(grammar, trace=TRACE_DEEP)
@classmethod
def as_str(cls, node, ignores=frozenset()):
"""
Return a parse tree node as a space-normalized string.
Optionally filters node labels provided in the ignores set.
"""
if ignores:
leaves = (leaf_text for leaf_text, leaf_label in node.leaves()
if leaf_label not in ignores)
else:
leaves = (leaf_text for leaf_text, leaf_label in node.leaves())
node_string = ' '.join(leaves)
return u' '.join(node_string.split())
def detect(self, numbered_lines,
copyrights=True, holders=True, authors=True, include_years=True,
_junk=JUNK_COPYRIGHTS):
"""
Yield tuples of (detection type, detected value, start_line, end_line)
where the type is one of copyrights, authors, holders. Use an iterable
of `numbered_lines` tuples of (line number, line text).
If `include_years` is False, the copyright statement do not have years
or year range information.
"""
from nltk.tree import Tree
numbered_lines = list(numbered_lines)
start_line = numbered_lines[0][0]
end_line = numbered_lines[-1][0]
tokens = self.get_tokens(numbered_lines)
if not tokens:
return
# first, POS tag each token using token regexes
tagged_text = self.tagger.tag(tokens)
if TRACE: logger_debug('CopyrightDetector:tagged_text: ' + str(tagged_text))
# then build a parse tree based on tagged tokens
tree = self.chunker.parse(tagged_text)
if TRACE: logger_debug('CopyrightDetector:parse tree: ' + str(tree))
CopyrightDetector_as_str = CopyrightDetector.as_str
if include_years:
year_labels = ()
else:
year_labels = frozenset(['YR-RANGE', 'YR', 'YR-AND', 'YR-PLUS', ])
non_holder_labels = frozenset([
'COPY',
'YR-RANGE', 'YR-AND', 'YR', 'YR-PLUS',
'EMAIL', 'URL',
'HOLDER', 'AUTHOR',
])
# then walk the parse tree, collecting copyrights, years and authors
for tree_node in tree:
if not isinstance(tree_node, Tree):
continue
node_text = CopyrightDetector_as_str(tree_node, ignores=year_labels)
tree_node_label = tree_node.label()
if 'COPYRIGHT' in tree_node_label:
if TRACE: logger_debug('CopyrightDetector:Copyright tree node: ' + str(tree_node))
if node_text and node_text.strip():
refined = refine_copyright(node_text)
# checking for junk is a last resort
if refined.lower() not in _junk:
if copyrights:
if TRACE: logger_debug('CopyrightDetector: detected copyrights:', refined, start_line, end_line)
yield 'copyrights', refined, start_line, end_line
if holders:
holder = CopyrightDetector_as_str(tree_node, ignores=non_holder_labels)
refined_holder = refine_holder(holder)
if refined_holder and refined_holder.strip():
yield 'holders', refined_holder, start_line, end_line
if TRACE: logger_debug('CopyrightDetector: detected holders:', refined_holder, start_line, end_line)
elif authors and tree_node_label == 'AUTHOR':
refined_auth = refine_author(node_text)
if refined_auth:
if TRACE: logger_debug('CopyrightDetector: detected authors:', refined_auth, start_line, end_line)
yield 'authors', refined_auth, start_line, end_line
def get_tokens(self, numbered_lines):
"""
Return an iterable of tokens from lines of text.
"""
tokens = []
tokens_append = tokens.append
for _line_number, line in numbered_lines:
line = prepare_text_line(line)
for tok in splitter(line):
# strip trailing single quotes and ignore empties
tok = tok.strip("' ")
# strip trailing colons: why?
tok = tok.rstrip(':').strip()
# strip leading @: : why?
tok = tok.lstrip('@').strip()
if tok and tok not in (':',):
tokens_append(tok)
if TRACE: logger_debug('CopyrightDetector:tokens: ' + repr(tokens))
return tokens
remove_non_chars = re.compile(r'[^a-z0-9]').sub
def prep_line(line):
"""
Return a tuple of (line, line with only chars) from a line of text prepared
for candidate and other checks or None.
"""
line = prepare_text_line(line.lower())
chars_only = remove_non_chars('', line)
return line, chars_only.strip()
def is_candidate(prepped_line):
"""
Return True if a prepped line is a candidate line for copyright detection
"""
if not prepped_line:
return False
if copyrights_hint.years(prepped_line):
# if TRACE: logger_debug('is_candidate: year in line:\n%(line)r' % locals())
return True
else:
# if TRACE: logger_debug('is_candidate: NOT year in line:\n%(line)r' % locals())
pass
for marker in copyrights_hint.statement_markers:
if marker in prepped_line:
# if TRACE: logger_debug('is_candidate: %(marker)r in line:\n%(line)r' % locals())
return True
def is_inside_statement(chars_only_line):
"""
Return True if a line ends with some strings that indicate we are still
inside a statement.
"""
markers = ('copyright', 'copyrights', 'copyrightby',) + copyrights_hint.all_years
return chars_only_line and chars_only_line.endswith(markers)
def is_end_of_statement(chars_only_line):
"""
Return True if a line ends with some strings that indicate we are at the end
of a statement.
"""
return chars_only_line and chars_only_line.endswith(('rightreserved', 'rightsreserved'))
def candidate_lines(numbered_lines):
"""
Yield lists of candidate lines where each list element is a tuple of
(line number, line text) given an iterable of numbered_lines as tuples of
(line number, line text) .
A candidate line is a line of text that may contain copyright statements.
A few lines before and after a candidate line are also included.
"""
candidates = deque()
candidates_append = candidates.append
candidates_clear = candidates.clear
# used as a state and line counter
in_copyright = 0
# the previous line (chars only)
previous_chars = None
for numbered_line in numbered_lines:
if TRACE: logger_debug('# candidate_lines: evaluating line:' + repr(numbered_line))
line_number, line = numbered_line
# FIXME: we shoud, get the prepared text from here and return effectively pre-preped lines
prepped, chars_only = prep_line(line)
if is_end_of_statement(chars_only):
candidates_append(numbered_line)
if TRACE:
cands = list(candidates)
logger_debug(' candidate_lines: is EOS: yielding candidates\n %(cands)r\n\n' % locals())
yield list(candidates)
candidates_clear()
in_copyright = 0
previous_chars = None
continue
elif is_candidate(prepped):
# the state is now "in copyright"
in_copyright = 2
candidates_append(numbered_line)
previous_chars = chars_only
if TRACE: logger_debug(' candidate_lines: line is candidate')
elif 's>' in line:
# this is for debian-style <s></s> copyright name tags
# the state is now "in copyright"
in_copyright = 2
candidates_append(numbered_line)
previous_chars = chars_only
if TRACE: logger_debug(' candidate_lines: line is <s></s>candidate')
elif in_copyright > 0:
if ((not chars_only)
and (not previous_chars.endswith(('copyright', 'copyrights', 'copyrightsby', 'copyrightby',)))):
# completely empty or only made of punctuations
if TRACE:
cands = list(candidates)
logger_debug(' candidate_lines: empty: yielding candidates\n %(cands)r\n\n' % locals())
yield list(candidates)
candidates_clear()
in_copyright = 0
previous_chars = None
else:
candidates_append(numbered_line)
# and decrement our state
in_copyright -= 1
if TRACE: logger_debug(' candidate_lines: line is in copyright')
elif candidates:
if TRACE:
cands = list(candidates)
logger_debug(' candidate_lines: not in COP: yielding candidates\n %(cands)r\n\n' % locals())
yield list(candidates)
candidates_clear()
in_copyright = 0
previous_chars = None
# finally
if candidates:
if TRACE:
cands = list(candidates)
logger_debug('candidate_lines: finally yielding candidates\n %(cands)r\n\n' % locals())
yield list(candidates)
# this catches tags but not does not remove the text inside tags
remove_tags = re.compile(
r'<'
r'[(--)\?\!\%\/]?'
r'[a-gi-vx-zA-GI-VX-Z][a-zA-Z#\"\=\s\.\;\:\%\&?!,\+\*\-_\/]*'
r'[a-zA-Z0-9#\"\=\s\.\;\:\%\&?!,\+\*\-_\/]+'
r'\/?>',
re.MULTILINE | re.UNICODE
).sub
def strip_markup(text):
"""
Strip markup tags from text.
"""
text = remove_tags(' ', text)
# Debian copyright file markup
return text.replace('</s>', '').replace('<s>', '').replace('<s/>', '')
# this catches the common C-style percent string formatting codes
remove_printf_format_codes = re.compile(r' [\#\%][a-zA-Z] ').sub
remove_punctuation = re.compile(r'[\*#"%\[\]\{\}`]+').sub
remove_ascii_decorations = re.compile(r'[-_=!\\*]{2,}|/{3,}').sub
fold_consecutive_quotes = re.compile(r"\'{2,}").sub
# less common rem comment line prefix in dos
# less common dnl comment line prefix in autotools am/in
remove_comment_markers = re.compile(r'^(rem|\@rem|dnl)\s+').sub
# common comment line prefix in man pages
remove_man_comment_markers = re.compile(r'.\\"').sub
def prepare_text_line(line):
"""
Prepare a unicode `line` of text for copyright detection.
"""
# remove some junk in man pages: \(co
line = line.replace(r'\\ co', ' ')
line = line.replace(r'\ co', ' ')
line = line.replace(r'(co ', ' ')
line = remove_printf_format_codes(' ', line)
# un common comment line prefixes
line = remove_comment_markers(' ', line)
line = remove_man_comment_markers(' ', line)
# C and C++ style markers
line = line.replace('^//', ' ')
line = line.replace('/*', ' ').replace('*/', ' ')
# un common pipe chars in some ascii art
line = line.replace('|', ' ')
# normalize copyright signs and spacing around them
line = line.replace('"Copyright', '" Copyright')
line = line.replace('( C)', ' (c) ')
line = line.replace('(C)', ' (c) ')
line = line.replace('(c)', ' (c) ')
# the case of \251 is tested by 'weirdencoding.h'
line = line.replace(u'©', u' (c) ')
line = line.replace(u'\251', u' (c) ')
line = line.replace('©', ' (c) ')
line = line.replace('©', ' (c) ')
line = line.replace('©', ' (c) ')
line = line.replace('©', ' (c) ')
line = line.replace(u'\xa9', ' (c) ')
line = line.replace(u'\XA9', ' (c) ')
# FIXME: what is \xc2???
line = line.replace(u'\xc2', '')
# not really a dash
# # MIT
line = line.replace(u'–', '-')
# TODO: add more HTML entities replacements
# see http://www.htmlhelp.com/reference/html40/entities/special.html
# convert html entities CR LF to space
line = line.replace(u' ', ' ')
line = line.replace(u' ', ' ')
line = line.replace(u' ', ' ')
# spaces
line = line.replace(u' ', ' ')
line = line.replace(u' ', ' ')
line = line.replace(u' ', ' ')
# common named entities
line = line.replace(u'"', '"').replace(u'"', '"')
line = line.replace(u'&', '&').replace(u'&', '&')
line = line.replace(u'>', '>').replace(u'>', '>')
line = line.replace(u'<', '<').replace(u'<', '<')
# normalize (possibly repeated) quotes to unique single quote '
# backticks ` and "
line = line.replace(u'`', "'")
line = line.replace(u'"', "'")
# keep only one quote
line = fold_consecutive_quotes("'", line)
# treat some escaped literal CR, LF, tabs, \00 as new lines
# such as in code literals: a="\\n some text"
line = line.replace('\\t', ' ')
line = line.replace('\\n', ' ')
line = line.replace('\\r', ' ')
line = line.replace('\\0', ' ')
# TODO: why backslashes?
line = line.replace('\\', ' ')
# replace ('
line = line.replace(r'("', ' ')
# some trailing garbage ')
line = line.replace("')", ' ')
line = line.replace("],", ' ')
# note that we do not replace the debian tag by a space: we remove it
line = strip_markup(line)
line = remove_punctuation(' ', line)
# normalize spaces around commas
line = line.replace(' , ', ', ')
# remove ASCII "line decorations"
# such as in --- or === or !!! or *****
line = remove_ascii_decorations(' ', line)
# in apache'>Copyright replace ">" by "> "
line = line.replace('>', '> ')
line = line.replace('<', ' <')
# normalize to ascii text
line = toascii(line, translit=True)
# normalize to use only LF as line endings so we can split correctly
# and keep line endings
line = unixlinesep(line)
# strip verbatim back slash and comment signs again at both ends of a line
# FIXME: this is done at the start of this function already
line = line.strip('\\/*#%;')
# normalize spaces
line = ' '.join(line.split())
return line
| 34.289409
| 175
| 0.583014
|
acfd8fc793c4c17a9556a150ad348425fd43413b
| 14,987
|
py
|
Python
|
global_resources/forms.py
|
Stephen-X/grumblr-microblogging
|
a01a22d31d9d4d37bce8acd092880ef2ed3f1e1e
|
[
"MIT"
] | 1
|
2020-02-19T23:58:41.000Z
|
2020-02-19T23:58:41.000Z
|
global_resources/forms.py
|
Stephen-X/grumblr-microblogging
|
a01a22d31d9d4d37bce8acd092880ef2ed3f1e1e
|
[
"MIT"
] | 7
|
2020-02-11T21:44:51.000Z
|
2021-09-07T23:36:39.000Z
|
global_resources/forms.py
|
Stephen-X/grumblr-microblogging
|
a01a22d31d9d4d37bce8acd092880ef2ed3f1e1e
|
[
"MIT"
] | null | null | null |
"""
Forms used by the site for validating user input.
Author: Stephen Xie <[redacted]@cmu.edu>
Version: 1.2.0
"""
from django import forms
from django.contrib.auth.models import User
from .models import Message, UserExtended, Comment
class UserLoginForm(forms.Form):
"""
Form for validating user login information.
"""
username = forms.CharField(max_length=30,
widget=forms.TextInput(
# customize html attribute of the
# generated <input> tag from the widget
# instance instantiation
attrs={
# name of the input field will be displayed
# as placeholder
'placeholder': 'Username',
# this element automatically gets focus when the page loads.
# Note-to-self: we can't use HTML5's attribute minimization (i.e.
# just an 'autofocus' boolean attribute) because attrs is a dictionary.
'autofocus': 'autofocus',
# must add the 'autofocus_field' id to all autofocus fields;
# it will be used during error modal toggling (js/toggle-error-modal.js).
'id': 'autofocus_field',
# inform browser that automatic completion feature can be enabled
# for this field
'autocomplete': 'username'
}
))
password = forms.CharField(max_length=200,
label='password',
widget=forms.PasswordInput(
attrs={
'placeholder': 'Password',
'autocomplete': 'current-password'
}
))
# the PasswordInput widget corresponds to the HTML form
# widget <input type="password">
# override form validation for the username field
def clean_username(self):
username = self.cleaned_data.get('username') # get the normalized username data
if not User.objects.filter(username__exact=username):
raise forms.ValidationError('Cannot find this username in our record.')
# generally return the cleaned data we got from the cleaned_data dictionary
return username
class UserPasswordForm(forms.Form):
"""
Form for validating user password input.
"""
password = forms.CharField(max_length=100,
label='password',
widget=forms.PasswordInput(
attrs={
'placeholder': 'Password',
'autocomplete': 'new-password',
'autofocus': 'autofocus',
'id': 'autofocus_field',
'class': 'form-control'
}
))
# the PasswordInput widget corresponds to the HTML form
# widget <input type="password">
password_confirm = forms.CharField(max_length=100,
label='password',
widget=forms.PasswordInput(
attrs={
'id': 'password-confirm',
'placeholder': 'Confirm password',
'class': 'form-control'
}
))
# override the forms.Form.clean function; this customize
# form validations so that user input data conform to
# a specified format
def clean(self):
# call parent(forms.Form)'s clean function, and get a
# dictionary of cleaned data
cleaned_data = super(UserPasswordForm, self).clean()
password = cleaned_data.get('password')
password_confirm = cleaned_data.get('password_confirm')
# confirm that the two passwords do match
# Note: don't need to check beforehand the two password fields are
# not None, because by default, each Field class assumes the value
# is required, so if you pass an empty value – either None or the
# empty string ("") – then clean() will raise a ValidationError
# exception, therefore the two fields are guaranteed to contain
# non-empty strings.
if password != password_confirm:
raise forms.ValidationError("Passwords did not match.")
# generally return the cleaned data we got from parent
return cleaned_data
class UserRegisterForm(UserPasswordForm):
"""
Form for validating user login information; inherited from the PasswordForm.
"""
first_name = forms.CharField(max_length=30,
widget=forms.TextInput(
attrs={
'placeholder': 'First name',
'autofocus': 'autofocus',
'id': 'autofocus_field',
}
))
last_name = forms.CharField(max_length=30,
widget=forms.TextInput(
attrs={
'placeholder': 'Last name'
}
))
email = forms.EmailField(max_length=100,
widget=forms.EmailInput(
attrs={
'placeholder': 'Email'
}
))
username = forms.CharField(max_length=30,
widget=forms.TextInput(
attrs={
'placeholder': 'Username',
# inform browser that automatic completion feature can be enabled
# for this field
'autocomplete': 'username'
}
))
password = forms.CharField(max_length=100,
label='password',
widget=forms.PasswordInput(
attrs={
'placeholder': 'Password',
'autocomplete': 'new-password',
}
))
password_confirm = forms.CharField(max_length=100,
label='password',
widget=forms.PasswordInput(
attrs={
'placeholder': 'Confirm password'
}
))
def __init__(self, *args, **kwargs):
super(UserRegisterForm, self).__init__(*args, **kwargs)
# since this form is inherited from the UserPasswordForm, the password
# fields will be in front of all fields. This manually specifies the
# field order if this form is used to automatically generate HTML forms
self.fields.keyOrder = ['first_name', 'last_name', 'email', 'username', 'password', 'password_confirm']
# override form validation for the username field
def clean_username(self):
# confirms that the username is not already present in the
# User model database
username = self.cleaned_data.get('username') # get the normalized username data
if User.objects.filter(username__exact=username):
raise forms.ValidationError("Username is already taken.")
# generally return the cleaned data we got from the cleaned_data dictionary
return username
class MessageForm(forms.ModelForm):
"""
Model form for validating posting messages; created from the Message model.
"""
class Meta:
model = Message
fields = ['message']
# Note: you should only include properties that will be modified by the USER;
# if there're any other attributes you need to modify in program, don't include
# them here, but instead use the save(commit=False) method and add those attributes
# later, as demonstrated here:
# https://docs.djangoproject.com/en/1.11/topics/forms/modelforms/#selecting-the-fields-to-use
widgets = {
'message': forms.TextInput(
attrs={
'placeholder': 'New Message',
'autofocus': 'autofocus',
'id': 'autofocus_field',
'class': 'text-box'
}
),
# 'photo': forms.FileInput()
}
class CommentForm(forms.ModelForm):
"""
Model form for validating posting messages; created from the Message model.
"""
class Meta:
model = Comment
fields = ['content']
widgets = {
'content': forms.TextInput(
attrs={
'placeholder': 'New Comment'
}
),
# 'photo': forms.FileInput()
}
class UserInfoForm(forms.ModelForm):
"""
Model form for validating content for editing information stored in User
"""
def __init__(self, *args, **kwargs):
super(UserInfoForm, self).__init__(*args, **kwargs)
# all fields are optional
for field in self.fields.values():
field.required = False
self.fields['first_name'].help_text = '30 characters max.'
# Note: (due to a bug in Django?) all widget attribute settings will not take effect
# if you try to modify fields using the method below; modify them in __init__() instead.
# first_name = forms.CharField(required=False, help_text='30 characters max.')
# last_name = forms.CharField(required=False)
# email = forms.EmailField(required=False)
class Meta:
model = User
fields = ['first_name', 'last_name', 'email']
widgets = {
'first_name': forms.TextInput(
attrs={
'placeholder': 'First name',
'autofocus': 'autofocus',
'class': 'form-control',
'id': 'autofocus_field',
}
),
'last_name': forms.TextInput(
attrs={
'placeholder': 'Last name',
'class': 'form-control',
'id': 'last_name'
}
),
'email': forms.EmailInput(
attrs={
'placeholder': 'Your email',
'class': 'form-control',
'id': 'email'
}
)
}
class UserExtInfoForm(forms.ModelForm):
"""
Model form for validating content for editing information stored in UserExtended
"""
def __init__(self, *args, **kwargs):
# delete default values inherited from models
# Ref: https://stackoverflow.com/a/2988630
initial = kwargs.get('initial', {})
initial['avatar'] = None
initial['signature'] = None
initial['gender'] = None
initial['age'] = None
initial['hometown'] = None
initial['hobby'] = None
initial['bio'] = None
kwargs['initial'] = initial
super(UserExtInfoForm, self).__init__(*args, **kwargs)
# all fields are optional
for field in self.fields.values():
field.required = False
# Note: (due to a bug in Django?) all widget attribute settings will not take effect
# if you try to modify fields using the method below; modify them in __init__() instead.
# avatar = forms.ImageField(required=False)
# signature = forms.CharField(required=False)
# gender = forms.ChoiceField(required=False)
# age = forms.IntegerField(required=False)
# hometown = forms.CharField(required=False)
# hobby = forms.CharField(required=False)
# bio = forms.CharField(required=False)
# an additional hidden field signaling backend view of user info modification
user_info_mod = forms.BooleanField(widget=forms.HiddenInput(), initial=True)
class Meta:
model = UserExtended
# fields also determines the order of auto-generated form fields
fields = ['avatar', 'signature', 'gender', 'age',
'hometown', 'hobby', 'bio']
widgets = {
'avatar': forms.FileInput(
attrs={
# attributes for Bootstrap form control and label
'class': 'form-control-file'
}
),
'signature': forms.TextInput(
attrs={
'placeholder': 'A short one-sentence signature',
'class': 'form-control',
'id': 'signature'
}
),
'age': forms.NumberInput(
attrs={
'placeholder': 'Your age',
'class': 'form-control',
'id': 'age'
}
),
'gender': forms.Select(
attrs={
'class': 'form-control',
'id': 'gender'
}
),
'hometown': forms.TextInput(
attrs={
'placeholder': 'Your hometown',
'class': 'form-control',
'id': 'hometown'
}
),
'hobby': forms.TextInput(
attrs={
'placeholder': 'Your hobby',
'class': 'form-control',
'id': 'hobby'
}
),
'bio': forms.Textarea(
attrs={
'placeholder': 'A short biography of who you are. Introduce yourself to the world!',
'class': 'form-control',
'id': 'bio',
'cols': 20,
'rows': 10
}
)
}
| 40.179625
| 112
| 0.475746
|
acfd92c46fd4fffe7a4f8fed504480889df0b235
| 1,843
|
py
|
Python
|
capirca/utils/config.py
|
google-admin/capirca
|
8c9e66456fedb3c0fc1c641dbefc41793e5c68d5
|
[
"Apache-2.0"
] | 604
|
2015-08-08T22:44:25.000Z
|
2022-03-30T11:51:23.000Z
|
capirca/utils/config.py
|
google-admin/capirca
|
8c9e66456fedb3c0fc1c641dbefc41793e5c68d5
|
[
"Apache-2.0"
] | 213
|
2015-08-04T20:11:22.000Z
|
2022-03-30T18:08:15.000Z
|
capirca/utils/config.py
|
google-admin/capirca
|
8c9e66456fedb3c0fc1c641dbefc41793e5c68d5
|
[
"Apache-2.0"
] | 207
|
2015-08-07T10:55:00.000Z
|
2022-03-02T17:07:34.000Z
|
"""A module to handle merging file configurations with CLI configs for Capirca."""
import yaml
defaults = {
'base_directory': './policies',
'definitions_directory': './def',
'policy_file': None,
'output_directory': './',
'optimize': False,
'recursive': True,
'debug': False,
'verbose': False,
'ignore_directories': ['DEPRECATED', 'def'],
'max_renderers': 10,
'shade_check': False,
'exp_info': 2
}
def yaml_loader(filename):
with open(filename, 'r') as f:
try:
data = yaml.safe_load(f)
except AttributeError:
data = yaml.safe_load(f)
return data
def flags_to_dict(absl_flags):
base = {
'base_directory': absl_flags.base_directory,
'definitions_directory': absl_flags.definitions_directory,
'policy_file': absl_flags.policy_file,
'output_directory': absl_flags.output_directory,
'optimize': absl_flags.optimize,
'recursive': absl_flags.recursive,
'debug': absl_flags.debug,
'verbose': absl_flags.verbose,
'ignore_directories': absl_flags.ignore_directories,
'max_renderers': absl_flags.max_renderers,
'shade_check': absl_flags.shade_check,
'exp_info': absl_flags.exp_info,
}
return {
flag: base[flag] for flag in filter(lambda f: base[f] is not None, base)
}
def merge_files(*files):
result = {}
for item in files:
data = yaml_loader(item)
result.update(data)
return {
flag: result[flag]
for flag in filter(lambda f: result[f] is not None, result)
}
def generate_configs(absl_flags):
cli_configs = flags_to_dict(absl_flags)
if absl_flags.config_file:
file_configs = merge_files(*absl_flags.config_file)
else:
file_configs = {}
result = defaults.copy()
result.update(cli_configs)
result.update(file_configs)
return result
| 23.935065
| 82
| 0.672816
|
acfd936c7df1fe7694735969415fde1fe1aaa882
| 1,102
|
py
|
Python
|
leetcode/ds_string_longest_substring_without_repeat_chs.py
|
ngovindaraj/Python
|
edbcd302533bef81aa0c01e902e6081df58f383c
|
[
"MIT"
] | null | null | null |
leetcode/ds_string_longest_substring_without_repeat_chs.py
|
ngovindaraj/Python
|
edbcd302533bef81aa0c01e902e6081df58f383c
|
[
"MIT"
] | null | null | null |
leetcode/ds_string_longest_substring_without_repeat_chs.py
|
ngovindaraj/Python
|
edbcd302533bef81aa0c01e902e6081df58f383c
|
[
"MIT"
] | null | null | null |
# @file Longest Substring Without Repeating Characters
# @brief Given a string, find the length of the longest substring without repeating characters.
# https://leetcode.com/problems/longest-substring-without-repeating-characters/
'''
Given a string, find the length of the longest substring without repeating characters.
Example 1:
Input: "abcabcbb"
Output: 3
Explanation: The answer is "abc", with the length of 3.
Example 2:
Input: "bbbbb"
Output: 1
Explanation: The answer is "b", with the length of 1.
Example 3:
Input: "pwwkew"
Output: 3
Explanation: The answer is "wke", with the length of 3.
Note that the answer must be a substring, "pwke" is a subsequence and not a substring.
'''
# Approach: two pointer
# time complexity : O(n)
# Space complexity : O(n) where every element in the list is unique
def lengthOfLongestSubstring(self, s: str) -> int:
b, maxlen = 0, 0
cset = set()
for e in range(len(s)):
while s[e] in cset:
cset.remove(s[b])
b += 1
cset.add(s[e])
maxlen = max(maxlen, e-b+1)
return maxlen
| 26.238095
| 99
| 0.678766
|
acfd940f129269a1a264dc07e4bcb77e0f86171a
| 6,360
|
py
|
Python
|
pybamm/models/full_battery_models/lithium_ion/spme.py
|
DrSOKane/PyBaMM
|
903b4a05ef5a4f91633e990d4aec12c53df723a2
|
[
"BSD-3-Clause"
] | null | null | null |
pybamm/models/full_battery_models/lithium_ion/spme.py
|
DrSOKane/PyBaMM
|
903b4a05ef5a4f91633e990d4aec12c53df723a2
|
[
"BSD-3-Clause"
] | null | null | null |
pybamm/models/full_battery_models/lithium_ion/spme.py
|
DrSOKane/PyBaMM
|
903b4a05ef5a4f91633e990d4aec12c53df723a2
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Single Particle Model with Electrolyte (SPMe)
#
import pybamm
from .base_lithium_ion_model import BaseModel
class SPMe(BaseModel):
"""Single Particle Model with Electrolyte (SPMe) of a lithium-ion battery, from
[1]_.
Parameters
----------
options : dict, optional
A dictionary of options to be passed to the model.
name : str, optional
The name of the model.
build : bool, optional
Whether to build the model on instantiation. Default is True. Setting this
option to False allows users to change any number of the submodels before
building the complete model (submodels cannot be changed after the model is
built).
References
----------
.. [1] SG Marquis, V Sulzer, R Timms, CP Please and SJ Chapman. “An asymptotic
derivation of a single particle model with electrolyte”. Journal of The
Electrochemical Society, 166(15):A3693–A3706, 2019
**Extends:** :class:`pybamm.lithium_ion.BaseModel`
"""
def __init__(
self, options=None, name="Single Particle Model with electrolyte", build=True
):
super().__init__(options, name)
self.set_external_circuit_submodel()
self.set_porosity_submodel()
self.set_tortuosity_submodels()
self.set_convection_submodel()
self.set_interfacial_submodel()
self.set_other_reaction_submodels_to_zero()
self.set_particle_submodel()
self.set_negative_electrode_submodel()
self.set_electrolyte_submodel()
self.set_positive_electrode_submodel()
self.set_thermal_submodel()
self.set_current_collector_submodel()
self.set_crack_submodel()
self.set_sei_submodel()
if build:
self.build_model()
pybamm.citations.register("marquis2019asymptotic")
def set_porosity_submodel(self):
if self.options["sei porosity change"] is False:
self.submodels["porosity"] = pybamm.porosity.Constant(self.param)
elif self.options["sei porosity change"] is True:
self.submodels["porosity"] = pybamm.porosity.LeadingOrder(self.param)
def set_convection_submodel(self):
self.submodels[
"through-cell convection"
] = pybamm.convection.through_cell.NoConvection(self.param)
self.submodels[
"transverse convection"
] = pybamm.convection.transverse.NoConvection(self.param)
def set_tortuosity_submodels(self):
self.submodels["electrolyte tortuosity"] = pybamm.tortuosity.Bruggeman(
self.param, "Electrolyte", True
)
self.submodels["electrode tortuosity"] = pybamm.tortuosity.Bruggeman(
self.param, "Electrode", True
)
def set_interfacial_submodel(self):
self.submodels["negative interface"] = pybamm.interface.InverseButlerVolmer(
self.param, "Negative", "lithium-ion main", self.options
)
self.submodels["positive interface"] = pybamm.interface.InverseButlerVolmer(
self.param, "Positive", "lithium-ion main", self.options
)
self.submodels[
"negative interface current"
] = pybamm.interface.CurrentForInverseButlerVolmer(
self.param, "Negative", "lithium-ion main"
)
self.submodels[
"positive interface current"
] = pybamm.interface.CurrentForInverseButlerVolmer(
self.param, "Positive", "lithium-ion main"
)
def set_particle_submodel(self):
if self.options["particle"] == "Fickian diffusion":
self.submodels["negative particle"] = pybamm.particle.FickianSingleParticle(
self.param, "Negative"
)
self.submodels["positive particle"] = pybamm.particle.FickianSingleParticle(
self.param, "Positive"
)
elif self.options["particle"] in [
"uniform profile",
"quadratic profile",
"quartic profile",
]:
self.submodels[
"negative particle"
] = pybamm.particle.PolynomialSingleParticle(
self.param, "Negative", self.options["particle"]
)
self.submodels[
"positive particle"
] = pybamm.particle.PolynomialSingleParticle(
self.param, "Positive", self.options["particle"]
)
def set_negative_electrode_submodel(self):
self.submodels["negative electrode"] = pybamm.electrode.ohm.Composite(
self.param, "Negative"
)
def set_positive_electrode_submodel(self):
self.submodels["positive electrode"] = pybamm.electrode.ohm.Composite(
self.param, "Positive"
)
def set_electrolyte_submodel(self):
if self.options["electrolyte conductivity"] not in [
"default",
"composite",
"integrated",
]:
raise pybamm.OptionError(
"electrolyte conductivity '{}' not suitable for SPMe".format(
self.options["electrolyte conductivity"]
)
)
if self.options["surface form"] is False:
if self.options["electrolyte conductivity"] in ["default", "composite"]:
self.submodels[
"electrolyte conductivity"
] = pybamm.electrolyte_conductivity.Composite(self.param)
elif self.options["electrolyte conductivity"] == "integrated":
self.submodels[
"electrolyte conductivity"
] = pybamm.electrolyte_conductivity.Integrated(self.param)
elif self.options["surface form"] == "differential":
raise NotImplementedError(
"surface form '{}' has not been implemented for SPMe yet".format(
self.options["surface form"]
)
)
elif self.options["surface form"] == "algebraic":
raise NotImplementedError(
"surface form '{}' has not been implemented for SPMe yet".format(
self.options["surface form"]
)
)
self.submodels["electrolyte diffusion"] = pybamm.electrolyte_diffusion.Full(
self.param
)
| 36.136364
| 88
| 0.609748
|
acfd947c2676d02f09893e2055e48139bc8e635d
| 5,796
|
py
|
Python
|
tests/hazmat/primitives/test_block.py
|
derwolfe/cryptography
|
a6112133d6797313ea8fe741daf25178b2abe25c
|
[
"Apache-2.0"
] | null | null | null |
tests/hazmat/primitives/test_block.py
|
derwolfe/cryptography
|
a6112133d6797313ea8fe741daf25178b2abe25c
|
[
"Apache-2.0"
] | null | null | null |
tests/hazmat/primitives/test_block.py
|
derwolfe/cryptography
|
a6112133d6797313ea8fe741daf25178b2abe25c
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import binascii
import pytest
from cryptography import utils
from cryptography.exceptions import (
AlreadyFinalized, _Reasons
)
from cryptography.hazmat.primitives import interfaces
from cryptography.hazmat.primitives.ciphers import (
Cipher, algorithms, modes
)
from .utils import (
generate_aead_exception_test, generate_aead_tag_exception_test
)
from ...utils import raises_unsupported_algorithm
@utils.register_interface(interfaces.Mode)
class DummyMode(object):
name = "dummy-mode"
def validate_for_algorithm(self, algorithm):
pass
@utils.register_interface(interfaces.CipherAlgorithm)
class DummyCipher(object):
name = "dummy-cipher"
@pytest.mark.cipher
class TestCipher(object):
def test_creates_encryptor(self, backend):
cipher = Cipher(
algorithms.AES(binascii.unhexlify(b"0" * 32)),
modes.CBC(binascii.unhexlify(b"0" * 32)),
backend
)
assert isinstance(cipher.encryptor(), interfaces.CipherContext)
def test_creates_decryptor(self, backend):
cipher = Cipher(
algorithms.AES(binascii.unhexlify(b"0" * 32)),
modes.CBC(binascii.unhexlify(b"0" * 32)),
backend
)
assert isinstance(cipher.decryptor(), interfaces.CipherContext)
def test_instantiate_with_non_algorithm(self, backend):
algorithm = object()
with pytest.raises(TypeError):
Cipher(algorithm, mode=None, backend=backend)
@pytest.mark.cipher
class TestCipherContext(object):
def test_use_after_finalize(self, backend):
cipher = Cipher(
algorithms.AES(binascii.unhexlify(b"0" * 32)),
modes.CBC(binascii.unhexlify(b"0" * 32)),
backend
)
encryptor = cipher.encryptor()
encryptor.update(b"a" * 16)
encryptor.finalize()
with pytest.raises(AlreadyFinalized):
encryptor.update(b"b" * 16)
with pytest.raises(AlreadyFinalized):
encryptor.finalize()
decryptor = cipher.decryptor()
decryptor.update(b"a" * 16)
decryptor.finalize()
with pytest.raises(AlreadyFinalized):
decryptor.update(b"b" * 16)
with pytest.raises(AlreadyFinalized):
decryptor.finalize()
def test_unaligned_block_encryption(self, backend):
cipher = Cipher(
algorithms.AES(binascii.unhexlify(b"0" * 32)),
modes.ECB(),
backend
)
encryptor = cipher.encryptor()
ct = encryptor.update(b"a" * 15)
assert ct == b""
ct += encryptor.update(b"a" * 65)
assert len(ct) == 80
ct += encryptor.finalize()
decryptor = cipher.decryptor()
pt = decryptor.update(ct[:3])
assert pt == b""
pt += decryptor.update(ct[3:])
assert len(pt) == 80
assert pt == b"a" * 80
decryptor.finalize()
@pytest.mark.parametrize("mode", [DummyMode(), None])
def test_nonexistent_cipher(self, backend, mode):
cipher = Cipher(
DummyCipher(), mode, backend
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
cipher.encryptor()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
cipher.decryptor()
def test_incorrectly_padded(self, backend):
cipher = Cipher(
algorithms.AES(b"\x00" * 16),
modes.CBC(b"\x00" * 16),
backend
)
encryptor = cipher.encryptor()
encryptor.update(b"1")
with pytest.raises(ValueError):
encryptor.finalize()
decryptor = cipher.decryptor()
decryptor.update(b"1")
with pytest.raises(ValueError):
decryptor.finalize()
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES("\x00" * 16), modes.GCM("\x00" * 12)
),
skip_message="Does not support AES GCM",
)
@pytest.mark.cipher
class TestAEADCipherContext(object):
test_aead_exceptions = generate_aead_exception_test(
algorithms.AES,
modes.GCM,
)
test_aead_tag_exceptions = generate_aead_tag_exception_test(
algorithms.AES,
modes.GCM,
)
@pytest.mark.cipher
class TestModeValidation(object):
def test_cbc(self, backend):
with pytest.raises(ValueError):
Cipher(
algorithms.AES(b"\x00" * 16),
modes.CBC(b"abc"),
backend,
)
def test_ofb(self, backend):
with pytest.raises(ValueError):
Cipher(
algorithms.AES(b"\x00" * 16),
modes.OFB(b"abc"),
backend,
)
def test_cfb(self, backend):
with pytest.raises(ValueError):
Cipher(
algorithms.AES(b"\x00" * 16),
modes.CFB(b"abc"),
backend,
)
def test_ctr(self, backend):
with pytest.raises(ValueError):
Cipher(
algorithms.AES(b"\x00" * 16),
modes.CTR(b"abc"),
backend,
)
| 29.876289
| 71
| 0.61715
|
acfd9532d436f12eda6f151e514a68cafe371f22
| 511
|
py
|
Python
|
PyMOTW/source/configparser/configparser_read_many.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | 1
|
2019-01-04T05:47:50.000Z
|
2019-01-04T05:47:50.000Z
|
PyMOTW/source/configparser/configparser_read_many.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | 1
|
2020-07-18T03:52:03.000Z
|
2020-07-18T04:18:01.000Z
|
PyMOTW/source/configparser/configparser_read_many.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | 2
|
2021-03-06T04:28:32.000Z
|
2021-03-06T04:59:17.000Z
|
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Reading a configuration file.
"""
#end_pymotw_header
from configparser import ConfigParser
import glob
parser = ConfigParser()
candidates = ['does_not_exist.ini', 'also-does-not-exist.ini',
'simple.ini', 'multisection.ini']
found = parser.read(candidates)
missing = set(candidates) - set(found)
print('Found config files:', sorted(found))
print('Missing files :', sorted(missing))
| 21.291667
| 62
| 0.704501
|
acfd957b01ce1889601fa7e28d9e177764c1ae9c
| 639
|
py
|
Python
|
api_site/src/api_x/zyt/evas/weixin_pay/entry/commons.py
|
webee/pay
|
b48c6892686bf3f9014bb67ed119506e41050d45
|
[
"W3C"
] | 1
|
2019-10-14T11:51:49.000Z
|
2019-10-14T11:51:49.000Z
|
api_site/src/api_x/zyt/evas/weixin_pay/entry/commons.py
|
webee/pay
|
b48c6892686bf3f9014bb67ed119506e41050d45
|
[
"W3C"
] | null | null | null |
api_site/src/api_x/zyt/evas/weixin_pay/entry/commons.py
|
webee/pay
|
b48c6892686bf3f9014bb67ed119506e41050d45
|
[
"W3C"
] | null | null | null |
# coding=utf-8
from __future__ import unicode_literals
from functools import wraps
from flask import request
from . import notify_response
from ..api_access import parse_and_verify_request_data
from ...error import *
def parse_and_verify(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
params = request.view_args
app = params['app']
verified_data = parse_and_verify_request_data(request.data, app)
except ApiError as _:
return notify_response.wrong()
request.__dict__['verified_data'] = verified_data
return f(*args, **kwargs)
return wrapper
| 26.625
| 76
| 0.679186
|
acfd9583f3d4cfea7ad5f86b1ec241b4f17eaccf
| 2,066
|
py
|
Python
|
openpyxl/chart/tests/test_marker.py
|
sekcheong/openpyxl
|
e1ba037f171efa348f75431c35a50de5ca277b78
|
[
"MIT"
] | null | null | null |
openpyxl/chart/tests/test_marker.py
|
sekcheong/openpyxl
|
e1ba037f171efa348f75431c35a50de5ca277b78
|
[
"MIT"
] | null | null | null |
openpyxl/chart/tests/test_marker.py
|
sekcheong/openpyxl
|
e1ba037f171efa348f75431c35a50de5ca277b78
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
# Copyright (c) 2010-2017 openpyxl
import pytest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def Marker():
from ..marker import Marker
return Marker
class TestMarker:
def test_ctor(self, Marker):
marker = Marker(symbol=None, size=5)
xml = tostring(marker.to_tree())
expected = """
<marker>
<symbol val="none"/>
<size val="5"/>
<spPr xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:ln>
<a:prstDash val="solid" />
</a:ln>
</spPr>
</marker>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Marker):
src = """
<marker>
<symbol val="square"/>
<size val="5"/>
</marker>
"""
node = fromstring(src)
marker = Marker.from_tree(node)
assert marker == Marker(symbol="square", size=5)
@pytest.fixture
def DataPoint():
from ..marker import DataPoint
return DataPoint
class TestDataPoint:
def test_ctor(self, DataPoint):
dp = DataPoint(idx=9)
xml = tostring(dp.to_tree())
expected = """
<dPt>
<idx val="9"/>
<spPr>
<a:ln xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:prstDash val="solid"/>
</a:ln>
</spPr>
</dPt>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, DataPoint):
src = """
<dPt>
<idx val="9"/>
<marker>
<symbol val="triangle"/>
<size val="5"/>
</marker>
<bubble3D val="0"/>
</dPt>
"""
node = fromstring(src)
dp = DataPoint.from_tree(node)
assert dp.idx == 9
assert dp.bubble3D is False
| 23.747126
| 84
| 0.521297
|
acfd95aa9698e7a67c0c93b65ff798489551f2d7
| 1,102
|
py
|
Python
|
find.py
|
wesselb/catalogue
|
88790504dcee8505f60efe559fd75fa81a948b60
|
[
"MIT"
] | null | null | null |
find.py
|
wesselb/catalogue
|
88790504dcee8505f60efe559fd75fa81a948b60
|
[
"MIT"
] | null | null | null |
find.py
|
wesselb/catalogue
|
88790504dcee8505f60efe559fd75fa81a948b60
|
[
"MIT"
] | 1
|
2018-10-25T17:41:01.000Z
|
2018-10-25T17:41:01.000Z
|
import argparse
import catalogue.alfred
import catalogue.bin
import catalogue.utils
from config import config
def main(args):
query = " ".join(args.query)
if args.json:
extensions = [".json"]
else:
extensions = [".pdf", ".djvu", ".epub"]
if args.content:
files = catalogue.bin.mdfind(config["resource_path"], query)
files = catalogue.utils.file_filter(files, extensions)
else:
files = catalogue.bin.fzf(
"\n".join(catalogue.utils.list_files(extensions)), query
)
print(catalogue.alfred.list_json(files, config["base_path"]))
if __name__ == "__main__":
desc = "Search through names of pdf resources."
parser = argparse.ArgumentParser(prog="find.py", description=desc)
parser.add_argument(
"--content", help="instead search content", action="store_true", default=False
)
parser.add_argument(
"--json", help="instead search json files", action="store_true", default=False
)
parser.add_argument("query", nargs="+", help="query to search for")
main(parser.parse_args())
| 30.611111
| 86
| 0.65608
|
acfd96165ec9ddcf4f3f3500c8c461da5f03c6a7
| 828
|
py
|
Python
|
BlogProject/blog/models.py
|
ninninninja/Blog_DjangoDRF
|
aa88c1dfe5a3bc0f79f366b3a10172c44df5ddd3
|
[
"MIT"
] | null | null | null |
BlogProject/blog/models.py
|
ninninninja/Blog_DjangoDRF
|
aa88c1dfe5a3bc0f79f366b3a10172c44df5ddd3
|
[
"MIT"
] | null | null | null |
BlogProject/blog/models.py
|
ninninninja/Blog_DjangoDRF
|
aa88c1dfe5a3bc0f79f366b3a10172c44df5ddd3
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Article(models.Model):
title = models.CharField(max_length = 100)
content = models.TextField()
tag = models.CharField(max_length = 50)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
image = models.ImageField(upload_to='media/', null=True)
def __str__(self) :
return self.title
class Meta:
db_table = "article"
ordering = ['-created']
class Comment(models.Model):
article = models.ForeignKey(Article, on_delete=models.CASCADE)
name = models.CharField(max_length=50, default='anonymous user')
email = models.EmailField()
created = models.DateTimeField(auto_now_add=True)
content = models.CharField(max_length=500)
| 33.12
| 68
| 0.68599
|
acfd9635453d707d7b9fc7a094e95edebbf6568d
| 26
|
py
|
Python
|
exercises/tournament/tournament.py
|
RJTK/python
|
f9678d629735f75354bbd543eb7f10220a498dae
|
[
"MIT"
] | 1
|
2021-05-15T19:59:04.000Z
|
2021-05-15T19:59:04.000Z
|
exercises/tournament/tournament.py
|
RJTK/python
|
f9678d629735f75354bbd543eb7f10220a498dae
|
[
"MIT"
] | null | null | null |
exercises/tournament/tournament.py
|
RJTK/python
|
f9678d629735f75354bbd543eb7f10220a498dae
|
[
"MIT"
] | 2
|
2018-03-03T08:32:12.000Z
|
2019-08-22T11:55:53.000Z
|
def tally(data):
pass
| 8.666667
| 16
| 0.615385
|
acfd9783fb68ee4838ee17adb0e20e29b67c7799
| 7,739
|
py
|
Python
|
training.py
|
peekarboo/LongShortTermMemory-Prediction
|
267d7940054232b69a1202d9652859d107876b62
|
[
"Apache-2.0"
] | null | null | null |
training.py
|
peekarboo/LongShortTermMemory-Prediction
|
267d7940054232b69a1202d9652859d107876b62
|
[
"Apache-2.0"
] | null | null | null |
training.py
|
peekarboo/LongShortTermMemory-Prediction
|
267d7940054232b69a1202d9652859d107876b62
|
[
"Apache-2.0"
] | null | null | null |
#Tutorial used from https://www.thepythoncode.com/article/stock-price-prediction-in-python-using-tensorflow-2-and-keras
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
import os
import time
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout, Bidirectional
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from yahoo_fin import stock_info as si
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from collections import deque
import numpy as np
import pandas as pd
import random
from tensorflow.keras.layers import LSTM
np.random.seed(3)
tf.random.set_seed(314)
random.seed(314)
coin_id=['BTC','ETH','ADA','XRP', 'USDT','BCH','LINK','LTC','BNB','EOS','XLM','TRX' ]
#coin_id=['TRX','XEM','XMR','MIOTA', 'VET','DASH','ETC','ZEC','OMG','BAT','DOGE','ZRX','WAVES','DGB','KNC','ICX','LRC','QTUM','REP','LSK','ANT','DCR','BTG','SC','NANO','BNT','BCS','SNT' ]
# parameters
N_STEPS = 80
# Lookup step, 1 is the next day
LOOKUP_STEP = 60
# test ratio size
TEST_SIZE = 0.2
# features to use
FEATURE_COLUMNS = ["adjclose", "volume", "open", "high", "low"]
# date now
date_now = time.strftime("%Y-%m-%d")
### model parameters
N_LAYERS = 3
# LSTM cell
CELL = LSTM
# 256 LSTM neurons
UNITS = 256
# 40% dropout
DROPOUT = 0.4
# whether to use bidirectional RNNs
BIDIRECTIONAL = False
### training parameters
LOSS = "huber_loss"
OPTIMIZER = "adam"
BATCH_SIZE = 64
EPOCHS = 500
# create these folders if they does not exist
if not os.path.isdir("results"):
os.mkdir("results")
if not os.path.isdir("logs"):
os.mkdir("logs")
if not os.path.isdir("data"):
os.mkdir("data")
if not os.path.isdir("model"):
os.mkdir("model")
for i in coin_id:
ticker = i+"-USD"
ticker_data_filename = os.path.join("data", f"{ticker}_{date_now}.csv")
# model name to save, making it as unique as possible based on parameters
model_name = f"{date_now}_{ticker}-{LOSS}-{OPTIMIZER}-{CELL.__name__}-seq-{N_STEPS}-step-{LOOKUP_STEP}-layers-{N_LAYERS}-units-{UNITS}"
if BIDIRECTIONAL:
model_name += "-b"
def load_data(ticker, n_steps=50, scale=True, shuffle=True, lookup_step=1,
test_size=0.2, feature_columns=['adjclose', 'volume', 'open', 'high', 'low']):
#Loads data from Yahoo Finance source, as well as scaling, shuffling, normalizing and splitting.
# see if ticker is already a loaded stock from yahoo finance, if it alreay loaded use it directly, else load it from the yahoo_fin library
if isinstance(ticker, str):
df = si.get_data(ticker)
elif isinstance(ticker, pd.DataFrame):
df = ticker
else:
raise TypeError("error, cannot load ticker")
result = {}
result['df'] = df.copy()
for i in feature_columns:
assert i in df.columns, f"'{i}' does not exist in the dataframe."
if scale:
column_scaler = {}
# scale the data (prices) from 0 to 1
for column in feature_columns:
data_scaler = preprocessing.MinMaxScaler()
df[column] = data_scaler.fit_transform(np.expand_dims(df[column].values, axis=1))
column_scaler[column] = data_scaler
# add the MinMaxScaler instances to the result returned
result["column_scaler"] = column_scaler
df['future'] = df['adjclose'].shift(-lookup_step)
# last `lookup_step` columns contains NaN in future column
# get them before droping NaNs
last_sequence = np.array(df[feature_columns].tail(lookup_step))
# drop NaNs
df.dropna(inplace=True)
sequence_data = []
sequences = deque(maxlen=n_steps)
for entry, target in zip(df[feature_columns].values, df['future'].values):
sequences.append(entry)
if len(sequences) == n_steps:
sequence_data.append([np.array(sequences), target])
# get the last sequence by appending the last `n_step` sequence with `lookup_step` sequence
# for instance, if n_steps=50 and lookup_step=10, last_sequence should be of 59 (that is 50+10-1) length
# this last_sequence will be used to predict in future dates that are not available in the dataset
last_sequence = list(sequences) + list(last_sequence)
# shift the last sequence by -1
last_sequence = np.array(pd.DataFrame(last_sequence).shift(-1).dropna())
# add to result
result['last_sequence'] = last_sequence
# construct the X's and y's
X, y = [], []
for seq, target in sequence_data:
X.append(seq)
y.append(target)
# convert to numpy arrays
X = np.array(X)
y = np.array(y)
# reshape X to fit the neural network
X = X.reshape((X.shape[0], X.shape[2], X.shape[1]))
# split the dataset
result["X_train"], result["X_test"], result["y_train"], result["y_test"] = train_test_split(X, y,
test_size=test_size,
shuffle=shuffle)
# return the result
return result
# load the data
def create_model(sequence_length, units=256, cell=LSTM, n_layers=2, dropout=0.3,
loss="mean_absolute_error", optimizer="rmsprop", bidirectional=False):
model = Sequential()
for i in range(n_layers):
if i == 0:
# first layer
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=True), input_shape=(None, sequence_length)))
else:
model.add(cell(units, return_sequences=True, input_shape=(None, sequence_length)))
elif i == n_layers - 1:
# last layer
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=False)))
else:
model.add(cell(units, return_sequences=False))
else:
# hidden layers
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=True)))
else:
model.add(cell(units, return_sequences=True))
# add dropout after each layer
model.add(Dropout(dropout))
model.add(Dense(1, activation="linear"))
model.compile(loss=loss, metrics=["mean_absolute_error"], optimizer=optimizer)
return model
data = load_data(ticker, N_STEPS, lookup_step=LOOKUP_STEP, test_size=TEST_SIZE, feature_columns=FEATURE_COLUMNS)
data["df"].to_csv(ticker_data_filename)
model = create_model(N_STEPS, loss=LOSS, units=UNITS, cell=CELL, n_layers=N_LAYERS,
dropout=DROPOUT, optimizer=OPTIMIZER, bidirectional=BIDIRECTIONAL)
# some tensorflow callbacks
checkpointer = ModelCheckpoint(os.path.join("results", model_name + ".h5"), save_weights_only=True,
save_best_only=True, verbose=1)
tensorboard = TensorBoard(log_dir=os.path.join("logs", model_name))
history = model.fit(data["X_train"], data["y_train"],
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(data["X_test"], data["y_test"]),
callbacks=[checkpointer, tensorboard],
verbose=1)
model.save(os.path.join("results", model_name) + ".h5")
| 37.75122
| 187
| 0.615583
|
acfd980f5f7a53ee0918684d518e567a9ef562d1
| 1,309
|
py
|
Python
|
Django/02-Object-Relational-Mapping/query_spanning_relationships/read_course_instructors.py
|
mustafa-sarshar/Python-DBMS
|
64afce1d3b4c1ef41de792c45b6d69095e234f10
|
[
"MIT"
] | null | null | null |
Django/02-Object-Relational-Mapping/query_spanning_relationships/read_course_instructors.py
|
mustafa-sarshar/Python-DBMS
|
64afce1d3b4c1ef41de792c45b6d69095e234f10
|
[
"MIT"
] | null | null | null |
Django/02-Object-Relational-Mapping/query_spanning_relationships/read_course_instructors.py
|
mustafa-sarshar/Python-DBMS
|
64afce1d3b4c1ef41de792c45b6d69095e234f10
|
[
"MIT"
] | null | null | null |
# Django specific settings
import inspect
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "query_spanning_relationships.settings")
from django.db import connection
# Ensure settings are read
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
from related_objects.models import *
from datetime import date
# Course has instructors reference field so can be used directly via forward access
courses = Course.objects.filter(instructors__first_name='Yan')
print("1. Get courses taught by Instructor `Yan`, forward")
print(courses)
print("\n")
# For each instructor, Django creates a implicit course_set. This is called backward access
instructor_yan = Instructor.objects.get(first_name='Yan')
print("1. Get courses taught by Instructor `Yan`, backward")
print(instructor_yan.course_set.all())
print("\n")
instructors = Instructor.objects.filter(course__name__contains='Cloud')
print("2. Get the instructors of Cloud app dev course")
print(instructors)
print("\n")
courses = Course.objects.filter(instructors__first_name='Yan')
occupation_list = set()
for course in courses:
for learner in course.learners.all():
occupation_list.add(learner.occupation)
print("3. Check the occupations of the courses taught by instructor Yan'")
print(occupation_list)
| 35.378378
| 91
| 0.7945
|
acfd9845acbdb8f6f2ea16ab41d9947413a07b99
| 3,422
|
py
|
Python
|
test/functional/wallet_keypool.py
|
blockchaintrainer/Gath3r
|
ae48e710f97a63a9f5bc0d57ad80fdeea38fcbd1
|
[
"MIT"
] | null | null | null |
test/functional/wallet_keypool.py
|
blockchaintrainer/Gath3r
|
ae48e710f97a63a9f5bc0d57ad80fdeea38fcbd1
|
[
"MIT"
] | null | null | null |
test/functional/wallet_keypool.py
|
blockchaintrainer/Gath3r
|
ae48e710f97a63a9f5bc0d57ad80fdeea38fcbd1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Gthpcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet keypool and interaction with wallet encryption/locking."""
import time
from test_framework.test_framework import GthpcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class KeyPoolTest(GthpcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
nodes = self.nodes
addr_before_encrypting = nodes[0].getnewaddress()
addr_before_encrypting_data = nodes[0].getaddressinfo(addr_before_encrypting)
wallet_info_old = nodes[0].getwalletinfo()
assert addr_before_encrypting_data['hdseedid'] == wallet_info_old['hdseedid']
# Encrypt wallet and wait to terminate
nodes[0].encryptwallet('test')
# Keep creating keys
addr = nodes[0].getnewaddress()
addr_data = nodes[0].getaddressinfo(addr)
wallet_info = nodes[0].getwalletinfo()
assert addr_before_encrypting_data['hdseedid'] != wallet_info['hdseedid']
assert addr_data['hdseedid'] == wallet_info['hdseedid']
assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
# put six (plus 2) new keys in the keypool (100% external-, +100% internal-keys, 1 in min)
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(6)
nodes[0].walletlock()
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize_hd_internal'], 6)
assert_equal(wi['keypoolsize'], 6)
# drain the internal keys
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
addr = set()
# the next one should fail
assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getrawchangeaddress)
# drain the external keys
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
assert len(addr) == 6
# the next one should fail
assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
# refill keypool with three new addresses
nodes[0].walletpassphrase('test', 1)
nodes[0].keypoolrefill(3)
# test walletpassphrase timeout
time.sleep(1.1)
assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0)
# drain the keypool
for _ in range(3):
nodes[0].getnewaddress()
assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getnewaddress)
nodes[0].walletpassphrase('test', 100)
nodes[0].keypoolrefill(100)
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize_hd_internal'], 100)
assert_equal(wi['keypoolsize'], 100)
if __name__ == '__main__':
KeyPoolTest().main()
| 39.333333
| 119
| 0.668907
|
acfd9877ad50848ffe7db5de89d14c94f8ba7913
| 7,862
|
py
|
Python
|
lib/layers.py
|
Alexab/3D_multiview_reg
|
c39a97391828ca0c4833c83367be92c1fb3c9950
|
[
"MIT"
] | 275
|
2020-01-15T02:33:34.000Z
|
2022-03-28T08:16:58.000Z
|
lib/layers.py
|
dj-boy/3D_multiview_reg
|
22468adadfccaffe92c6d88a4cce42b5b7edb77e
|
[
"MIT"
] | 20
|
2020-03-15T13:08:35.000Z
|
2022-03-17T07:59:52.000Z
|
lib/layers.py
|
dj-boy/3D_multiview_reg
|
22468adadfccaffe92c6d88a4cce42b5b7edb77e
|
[
"MIT"
] | 47
|
2020-01-19T08:58:52.000Z
|
2022-03-25T04:21:51.000Z
|
import torch
import torch.nn.functional as F
import numpy as np
import time
from sklearn.neighbors import NearestNeighbors
from lib.utils import extract_mutuals, pairwise_distance, knn_point
#from Pointnet2_PyTorch.pointnet2_ops_lib.pointnet2_ops import pointnet2_utils
class Soft_NN(torch.nn.Module):
""" Nearest neighbor class. Constructs either a stochastic (differentiable) or hard nearest neighbors layer.
Args:
corr_type (string): type of the NN search
st (bool): if straight through gradient propagation should be used (biased) (https://arxiv.org/abs/1308.3432)
inv_temp (float): initial value for the inverse temperature used in softmax and gumbel_softmax
device (torch device): device to use
"""
def __init__(self, corr_type='soft', st=True, temp = 0.3, min_temp = 1e-4, device = 'cuda'):
super().__init__()
assert corr_type in ['soft', 'hard', 'soft_gumbel'], 'Wrong correspondence type selected. Must be one of [soft, soft_gumbel, hard]'
if corr_type == 'hard':
print('Gradients cannot be backpropagated to the feature descriptor because hard NN search is selected.')
self.device = device
self.corr_type = corr_type
self.st = st
self.min_temp = torch.tensor([min_temp]).to(self.device)
self._temperature = torch.nn.Parameter(torch.tensor(
temp,
requires_grad=True,
dtype=torch.float32,
).to(self.device)
)
def get_temp(self):
return torch.max(self._temperature**2, self.min_temp).to(self.device)
def forward(self, x_f, y_f, y_c):
""" Computes the correspondences in the feature space based on the selected parameters.
Args:
x_f (torch.tensor): infered features of points x [b,n,c]
y_f (torch.tensor): infered features of points y [b,m,c]
y_c (torch.tensor): coordinates of point y [b,m,3]
Returns:
x_corr (torch.tensor): coordinates of the feature based correspondences of points x [b,n,3]
"""
dist = pairwise_distance(x_f,y_f).detach()
if self.corr_type == 'soft':
y_soft = torch.softmax(-dist/(self.get_temp()), dim=2)
if self.st:
# Straight through.
index = y_soft.max(dim=2, keepdim=True)[1]
y_hard = torch.zeros_like(y_soft).scatter_(dim=2, index=index, value=1.0)
ret = y_hard - y_soft.detach() + y_soft
else:
ret = y_soft
elif self.corr_type == 'soft_gumbel':
if self.st:
# Straight through.
ret = F.gumbel_softmax(-dist, tau=self.get_temp(), hard=True)
else:
ret = F.gumbel_softmax(-dist, tau=self.get_temp(), hard=False)
else:
index = dist.min(dim=2, keepdim=True)[1]
ret = torch.zeros_like(dist).scatter_(dim=2, index=index, value=1.0)
# Compute corresponding coordinates
x_corr = torch.matmul(ret, y_c)
return x_corr
class Sampler(torch.nn.Module):
""" Sampler class. Constructs a layer used to sample the points either based on their metric distance (FPS) or by randomly selecting them.
Args:
samp_type (string): type of the sampling to be used
st (bool): if straight through gradient propagation should be used (biased) (https://arxiv.org/abs/1308.3432)
inv_temp (float): initial value for the inverse temperature used in softmax and gumbel_softmax
"""
def __init__(self, samp_type='fps', targeted_num_points=2000):
super().__init__()
assert samp_type in ['fps', 'rand'], 'Wrong sampling type selected. Must be one of [fps, rand]'
self.samp_type = samp_type
self.targeted_num_points = targeted_num_points
def forward(self, input_C, input_F, pts_list):
""" Samples the predifined points from the input point cloud and the corresponding feature descriptors.
Args:
input_C (torch.tensor): coordinates of the points [~b*n,3]
input_F (torch.tensor): infered features [~b*n,c]
pts_list (list): list with the number of points of each point cloud in the batch
Returns:
sampled_C (torch tensor): coordinates of the sampled points [b,m,3]
sampled_F (torch tensor): features of the sampled points [b,m,c]
"""
# Sample the data
idx_temp = []
sampled_F = []
sampled_C = []
# Final number of points to be sampled is the min of the desired number of points and smallest number of point in the batch
num_points = min(self.targeted_num_points, min(pts_list.cpu().numpy()))
for i in range(len(pts_list)):
pcd_range = torch.arange(torch.sum(pts_list[:i]), torch.sum(pts_list[:(i + 1)]), 1)
if self.samp_type == 'fps':
temp_pcd = torch.index_select(input_C, dim=0, index=pcd_range.to(input_C).long())
# Perform farthest point sampling on the current point cloud
idxs = pointnet2_utils.furthest_point_sample(temp_pcd, num_points)
# Move the indeces to the start of this point cloud
idxs += pcd_range[0]
elif self.samp_type == 'rand':
# Randomly select the indices to keep
if num_points >= self.targeted_num_points:
idxs = torch.from_numpy(np.random.choice(pcd_range, self.targeted_num_points, replace=False)).to(input_C)
else:
idxs = torch.from_numpy(np.random.choice(pcd_range, self.targeted_num_points, replace=True)).to(input_C)
sampled_F.append(torch.index_select(input_F, dim=0, index=idxs.long()))
sampled_C.append(torch.index_select(input_C, dim=0, index=idxs.long()))
return torch.stack(sampled_C, dim=0), torch.stack(sampled_F, dim=0)
if __name__ == "__main__":
test = torch.rand((3,10,10))
test_1 = torch.rand((3,10,10))
test_2 = torch.rand((3,10,3))
soft_nn_1 = Soft_NN(corr_type='soft')
soft_nn_2 = Soft_NN(corr_type='soft_gumbel')
soft_nn_3 = Soft_NN(corr_type='hard')
# Iterrative
neigh = NearestNeighbors()
ret_iter = []
array_input = test_1[0,:,:]
for i in range(test.shape[0]):
neigh.fit(test_1[i,:,:].cpu().numpy())
idx = neigh.kneighbors(test[i,:,:].cpu().numpy(), n_neighbors=1, return_distance=False)
ret_iter.append(test_2[i,idx.reshape(-1,),:])
ret_iter = torch.stack(ret_iter)
ret_1 = soft_nn_1(test,test_1,test_2)
ret_2 = soft_nn_2(test,test_1,test_2)
ret_3 = soft_nn_3(test,test_1,test_2)
diff = ret_1 - ret_2
diff_2 = ret_2 - ret_3
diff_3 = ret_1 - ret_3
diff_4 = ret_1 - ret_iter
# Test the mutuals
pc_1 = torch.rand((5,2000,3)).cuda()
pc_2 = torch.rand((5,2000,3)).cuda()
pc_1_soft_c = torch.rand((5,2000,3)).cuda()
pc_2_soft_c = torch.rand((5,2000,3)).cuda()
test_mutuals = extract_mutuals(pc_1, pc_2, pc_1_soft_c, pc_2_soft_c)
# Test the sampler
test_C = torch.rand(3000,3).float()
test_F = torch.rand(3000,32).float()
pts_list = [300,700,1000,400,600]
# Test random sampling
sampler = Sampler(targeted_num_points=100,samp_type='rand')
sampled_C, sampled_F = sampler(test_C,test_F,pts_list)
# Test fps
sampler_fps = Sampler(targeted_num_points=100, samp_type='fps')
| 36.738318
| 143
| 0.605444
|
acfd988a77c938ef38dfccad5452836b87c457f9
| 9,731
|
py
|
Python
|
lcfcn/networks.py
|
AliKhoda/LCFCN
|
f1bf0752a92fde824d6fb70e88c443890cdb51f8
|
[
"Apache-2.0"
] | 170
|
2018-10-28T07:04:49.000Z
|
2022-03-08T12:14:12.000Z
|
lcfcn/networks.py
|
AliKhoda/LCFCN
|
f1bf0752a92fde824d6fb70e88c443890cdb51f8
|
[
"Apache-2.0"
] | 44
|
2019-01-16T11:05:39.000Z
|
2022-02-21T15:37:49.000Z
|
lcfcn/networks.py
|
AliKhoda/LCFCN
|
f1bf0752a92fde824d6fb70e88c443890cdb51f8
|
[
"Apache-2.0"
] | 46
|
2018-11-09T04:34:22.000Z
|
2022-02-22T14:06:21.000Z
|
import torch.nn as nn
import torch
import numpy as np
import torchvision
import torch.utils.model_zoo as model_zoo
class FCN8_VGG16(nn.Module):
def __init__(self, n_classes):
super().__init__()
self.n_classes = n_classes
# PREDEFINE LAYERS
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.relu = nn.ReLU(inplace=True)
# VGG16 PART
self.conv1_1 = conv3x3(3, 64, stride=1, padding=100)
self.conv1_2 = conv3x3(64, 64)
self.conv2_1 = conv3x3(64, 128)
self.conv2_2 = conv3x3(128, 128)
self.conv3_1 = conv3x3(128, 256)
self.conv3_2 = conv3x3(256, 256)
self.conv3_3 = conv3x3(256, 256)
self.conv4_1 = conv3x3(256, 512)
self.conv4_2 = conv3x3(512, 512)
self.conv4_3 = conv3x3(512, 512)
self.conv5_1 = conv3x3(512, 512)
self.conv5_2 = conv3x3(512, 512)
self.conv5_3 = conv3x3(512, 512)
self.fc6 = nn.Conv2d(512, 4096, kernel_size=7, stride=1, padding=0)
self.dropout = nn.Dropout()
self.fc7 = nn.Conv2d(4096, 4096, kernel_size=1, stride=1, padding=0)
# SEMANTIC SEGMENTAION PART
self.scoring_layer = nn.Conv2d(4096, self.n_classes, kernel_size=1,
stride=1, padding=0)
self.upscore2 = nn.ConvTranspose2d(self.n_classes, self.n_classes,
kernel_size=4, stride=2, bias=False)
self.upscore_pool4 = nn.ConvTranspose2d(self.n_classes, self.n_classes,
kernel_size=4, stride=2, bias=False)
self.upscore8 = nn.ConvTranspose2d(self.n_classes, self.n_classes,
kernel_size=16, stride=8, bias=False)
# Initilize Weights
self.scoring_layer.weight.data.zero_()
self.scoring_layer.bias.data.zero_()
self.score_pool3 = nn.Conv2d(256, self.n_classes, kernel_size=1)
self.score_pool4 = nn.Conv2d(512, self.n_classes, kernel_size=1)
self.score_pool3.weight.data.zero_()
self.score_pool3.bias.data.zero_()
self.score_pool4.weight.data.zero_()
self.score_pool4.bias.data.zero_()
self.upscore2.weight.data.copy_(get_upsampling_weight(self.n_classes, self.n_classes, 4))
self.upscore_pool4.weight.data.copy_(get_upsampling_weight(self.n_classes, self.n_classes, 4))
self.upscore8.weight.data.copy_(get_upsampling_weight(self.n_classes, self.n_classes, 16))
# Pretrained layers
pth_url = 'https://download.pytorch.org/models/vgg16-397923af.pth' # download from model zoo
state_dict = model_zoo.load_url(pth_url)
layer_names = [layer_name for layer_name in state_dict]
counter = 0
for p in self.parameters():
if counter < 26: # conv1_1 to pool5
p.data = state_dict[ layer_names[counter] ]
elif counter == 26: # fc6 weight
p.data = state_dict[ layer_names[counter] ].view(4096, 512, 7, 7)
elif counter == 27: # fc6 bias
p.data = state_dict[ layer_names[counter] ]
elif counter == 28: # fc7 weight
p.data = state_dict[ layer_names[counter] ].view(4096, 4096, 1, 1)
elif counter == 29: # fc7 bias
p.data = state_dict[ layer_names[counter] ]
counter += 1
def forward(self, x):
n,c,h,w = x.size()
# VGG16 PART
conv1_1 = self.relu( self.conv1_1(x) )
conv1_2 = self.relu( self.conv1_2(conv1_1) )
pool1 = self.pool(conv1_2)
conv2_1 = self.relu( self.conv2_1(pool1) )
conv2_2 = self.relu( self.conv2_2(conv2_1) )
pool2 = self.pool(conv2_2)
conv3_1 = self.relu( self.conv3_1(pool2) )
conv3_2 = self.relu( self.conv3_2(conv3_1) )
conv3_3 = self.relu( self.conv3_3(conv3_2) )
pool3 = self.pool(conv3_3)
conv4_1 = self.relu( self.conv4_1(pool3) )
conv4_2 = self.relu( self.conv4_2(conv4_1) )
conv4_3 = self.relu( self.conv4_3(conv4_2) )
pool4 = self.pool(conv4_3)
conv5_1 = self.relu( self.conv5_1(pool4) )
conv5_2 = self.relu( self.conv5_2(conv5_1) )
conv5_3 = self.relu( self.conv5_3(conv5_2) )
pool5 = self.pool(conv5_3)
fc6 = self.dropout( self.relu( self.fc6(pool5) ) )
fc7 = self.dropout( self.relu( self.fc7(fc6) ) )
# SEMANTIC SEGMENTATION PART
# first
scores = self.scoring_layer( fc7 )
upscore2 = self.upscore2(scores)
# second
score_pool4 = self.score_pool4(pool4)
score_pool4c = score_pool4[:, :, 5:5+upscore2.size(2),
5:5+upscore2.size(3)]
upscore_pool4 = self.upscore_pool4(score_pool4c + upscore2)
# third
score_pool3 = self.score_pool3(pool3)
score_pool3c = score_pool3[:, :, 9:9+upscore_pool4.size(2),
9:9+upscore_pool4.size(3)]
output = self.upscore8(score_pool3c + upscore_pool4)
return output[:, :, 31: (31 + h), 31: (31 + w)].contiguous()
class FCN8_ResNet(nn.Module):
def __init__(self, n_classes):
super().__init__()
self.n_classes = n_classes
# Load the pretrained weights, remove avg pool
# layer and get the output stride of 8
resnet50_32s = torchvision.models.resnet50(pretrained=True)
resnet_block_expansion_rate = resnet50_32s.layer1[0].expansion
# Create a linear layer -- we don't need logits in this case
resnet50_32s.fc = nn.Sequential()
self.resnet50_32s = resnet50_32s
self.score_32s = nn.Conv2d(512 * resnet_block_expansion_rate,
self.n_classes,
kernel_size=1)
self.score_16s = nn.Conv2d(256 * resnet_block_expansion_rate,
self.n_classes,
kernel_size=1)
self.score_8s = nn.Conv2d(128 * resnet_block_expansion_rate,
self.n_classes,
kernel_size=1)
# # FREEZE BATCH NORMS
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.requires_grad = False
m.bias.requires_grad = False
def extract_features(self, x_input):
self.resnet50_32s.eval()
x = self.resnet50_32s.conv1(x_input)
x = self.resnet50_32s.bn1(x)
x = self.resnet50_32s.relu(x)
x = self.resnet50_32s.maxpool(x)
x = self.resnet50_32s.layer1(x)
x_8s = self.resnet50_32s.layer2(x)
x_16s = self.resnet50_32s.layer3(x_8s)
x_32s = self.resnet50_32s.layer4(x_16s)
return x_8s, x_16s, x_32s
def forward(self, x):
self.resnet50_32s.eval()
input_spatial_dim = x.size()[2:]
x = self.resnet50_32s.conv1(x)
x = self.resnet50_32s.bn1(x)
x = self.resnet50_32s.relu(x)
x = self.resnet50_32s.maxpool(x)
x = self.resnet50_32s.layer1(x)
x = self.resnet50_32s.layer2(x)
logits_8s = self.score_8s(x)
x = self.resnet50_32s.layer3(x)
logits_16s = self.score_16s(x)
x = self.resnet50_32s.layer4(x)
logits_32s = self.score_32s(x)
logits_16s_spatial_dim = logits_16s.size()[2:]
logits_8s_spatial_dim = logits_8s.size()[2:]
logits_16s += nn.functional.interpolate(logits_32s,
size=logits_16s_spatial_dim,
mode="bilinear",
align_corners=True)
logits_8s += nn.functional.interpolate(logits_16s,
size=logits_8s_spatial_dim,
mode="bilinear",
align_corners=True)
logits_upsampled = nn.functional.interpolate(logits_8s,
size=input_spatial_dim,
mode="bilinear",
align_corners=True)
return logits_upsampled
# Utils
def get_upsampling_weight(in_channels, out_channels, kernel_size):
"""Make a 2D bilinear kernel suitable for upsampling"""
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),
dtype=np.float64)
weight[range(in_channels), range(out_channels), :, :] = filt
return torch.from_numpy(weight).float()
def conv3x3(in_planes, out_planes, stride=1, padding=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=(3,3), stride=(stride,stride),
padding=(padding,padding))
def conv1x1(in_planes, out_planes, stride=1):
"1x1 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0)
| 38.011719
| 102
| 0.559757
|
acfd99696c400e968e09a9372e373c8bb6f82047
| 5,345
|
py
|
Python
|
dev/tools/leveleditor/direct/showbase/ObjectPool.py
|
CrankySupertoon01/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2021-02-13T22:40:50.000Z
|
2021-02-13T22:40:50.000Z
|
dev/tools/leveleditor/direct/showbase/ObjectPool.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
dev/tools/leveleditor/direct/showbase/ObjectPool.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 2
|
2019-12-02T01:39:10.000Z
|
2021-02-13T22:41:00.000Z
|
"""Undocumented Module"""
__all__ = ['Diff', 'ObjectPool']
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.showbase.PythonUtil import invertDictLossless, makeList, safeRepr
from direct.showbase.PythonUtil import getNumberedTypedString, getNumberedTypedSortedString
from direct.showbase.PythonUtil import getNumberedTypedSortedStringWithReferrersGen
import types
import gc
class Diff:
def __init__(self, lost, gained):
self.lost=lost
self.gained=gained
def printOut(self, full=False):
print 'lost %s objects, gained %s objects' % (len(self.lost), len(self.gained))
print '\n\nself.lost\n'
print self.lost.typeFreqStr()
print '\n\nself.gained\n'
print self.gained.typeFreqStr()
if full:
self.gained.printObjsByType()
print '\n\nGAINED-OBJECT REFERRERS\n'
self.gained.printReferrers(1)
class ObjectPool:
"""manipulate a pool of Python objects"""
notify = directNotify.newCategory('ObjectPool')
def __init__(self, objects):
self._objs = list(objects)
self._type2objs = {}
self._count2types = {}
self._len2obj = {}
type2count = {}
for obj in self._objs:
typ = itype(obj)
type2count.setdefault(typ, 0)
type2count[typ] += 1
self._type2objs.setdefault(typ, [])
self._type2objs[typ].append(obj)
try:
self._len2obj[len(obj)] = obj
except:
pass
self._count2types = invertDictLossless(type2count)
def _getInternalObjs(self):
return (self._objs, self._type2objs, self._count2types)
def destroy(self):
del self._objs
del self._type2objs
del self._count2types
def getTypes(self):
return self._type2objs.keys()
def getObjsOfType(self, type):
return self._type2objs.get(type, [])
def printObjsOfType(self, type):
for obj in self._type2objs.get(type, []):
print repr(obj)
def diff(self, other):
"""print difference between this pool and 'other' pool"""
thisId2obj = {}
otherId2obj = {}
for obj in self._objs:
thisId2obj[id(obj)] = obj
for obj in other._objs:
otherId2obj[id(obj)] = obj
thisIds = set(thisId2obj.keys())
otherIds = set(otherId2obj.keys())
lostIds = thisIds.difference(otherIds)
gainedIds = otherIds.difference(thisIds)
del thisIds
del otherIds
lostObjs = []
for i in lostIds:
lostObjs.append(thisId2obj[i])
gainedObjs = []
for i in gainedIds:
gainedObjs.append(otherId2obj[i])
return Diff(self.__class__(lostObjs), self.__class__(gainedObjs))
def typeFreqStr(self):
s = 'Object Pool: Type Frequencies'
s += '\n============================='
counts = list(set(self._count2types.keys()))
counts.sort()
counts.reverse()
for count in counts:
types = makeList(self._count2types[count])
for typ in types:
s += '\n%s\t%s' % (count, typ)
return s
def printObjsByType(self, printReferrers=False):
print 'Object Pool: Objects By Type'
print '\n============================'
counts = list(set(self._count2types.keys()))
counts.sort()
# print types with the smallest number of instances first, in case
# there's a large group that waits a long time before printing
#counts.reverse()
for count in counts:
types = makeList(self._count2types[count])
for typ in types:
print 'TYPE: %s, %s objects' % (repr(typ), len(self._type2objs[typ]))
if printReferrers:
for line in getNumberedTypedSortedStringWithReferrersGen(self._type2objs[typ]):
print line
else:
print getNumberedTypedSortedString(self._type2objs[typ])
def containerLenStr(self):
s = 'Object Pool: Container Lengths'
s += '\n=============================='
lengths = list(self._len2obj.keys())
lengths.sort()
lengths.reverse()
for count in counts:
pass
def printReferrers(self, numEach=3):
"""referrers of the first few of each type of object"""
counts = list(set(self._count2types.keys()))
counts.sort()
counts.reverse()
for count in counts:
types = makeList(self._count2types[count])
for typ in types:
print '\n\nTYPE: %s' % repr(typ)
for i in xrange(min(numEach,len(self._type2objs[typ]))):
obj = self._type2objs[typ][i]
print '\nOBJ: %s\n' % safeRepr(obj)
referrers = gc.get_referrers(obj)
print '%s REFERRERS:\n' % len(referrers)
if len(referrers):
print getNumberedTypedString(referrers, maxLen=80,
numPrefix='REF')
else:
print '<No Referrers>'
def __len__(self):
return len(self._objs)
| 35.633333
| 99
| 0.566137
|
acfd99cf849c3bfc3fc17053cff20c608ea43c39
| 3,209
|
py
|
Python
|
src/cudamon.py
|
djenriquez/cudamon
|
1f7d57245065cb0023feb8dbf9d86b775e656147
|
[
"MIT"
] | null | null | null |
src/cudamon.py
|
djenriquez/cudamon
|
1f7d57245065cb0023feb8dbf9d86b775e656147
|
[
"MIT"
] | null | null | null |
src/cudamon.py
|
djenriquez/cudamon
|
1f7d57245065cb0023feb8dbf9d86b775e656147
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
import subprocess as sp
import xml.etree.ElementTree as ET
import re
import os
import logging
from src import sns
class CUDAMon:
def __init__(self):
self.gpus = []
self.sns = sns.SNS()
self._get_nvidia_smi()
logging.info('Cards detected: ')
for gpu in self.gpus:
logging.info(gpu['card'])
def check_gpus(self):
logging.debug('Checking GPUs')
self._get_nvidia_smi()
running = self._is_card_running()
cool = self._is_card_temp_ok()
if running and cool:
self.sns.reset_alert()
else:
self.sns.alert()
def _get_nvidia_smi(self):
data = sp.check_output(['nvidia-smi', '-q', '-x'])
root = ET.fromstring(data)
self.gpus = []
decimal = re.compile('\d+\.*\d*')
card_r = re.compile('\d+\s*\S*')
for gpu in root.iter('gpu'):
pcie_id = gpu.attrib['id']
product_brand = gpu.find('product_brand').text
card = gpu.find('product_name').text
pcie_bus = gpu.find('pci').find('pci_bus').text
fan_speed = decimal.findall(gpu.find('fan_speed').text)[0]
gpu_util = decimal.findall(gpu.find('utilization').find('gpu_util').text)[0]
memory_util = decimal.findall(gpu.find('utilization').find('memory_util').text)[0]
temp = decimal.findall(gpu.find('temperature').find('gpu_temp').text)[0]
power = decimal.findall(gpu.find('power_readings').find('power_draw').text)[0]
card_arch = card_r.findall(gpu.find('product_name').text)[0]
if 'ti' in card_arch.lower():
card_arch = '{}_{}'.format(decimal.findall(card)[0], 'TI')
gpu_item = { 'bus': pcie_bus, 'card': card, 'card_arch': card_arch, 'fan_speed': fan_speed, 'gpu_util': gpu_util, 'memory_util': memory_util, 'temp': temp, 'temp_units': 'Celcius', 'power': power, 'power_units': 'Watts' }
self.gpus.append(gpu_item)
def _is_card_running(self):
logging.debug('Checking GPU utilzation')
all_running = True
for gpu in self.gpus:
config_util = os.getenv('GPU_UTIL_{}'.format(gpu['card_arch']), 80)
if float(gpu['gpu_util']) < float(config_util):
self.sns.publish('GPU {} is running {}% utilization, needs {}%. Verify it is still running.'.format(gpu['card'], gpu['gpu_util'], config_util))
all_running = False
if not all_running:
logging.warn('Low GPU utilization detected')
return all_running
def _is_card_temp_ok(self):
logging.debug('Checking GPU Temperatures')
all_cool = True
for gpu in self.gpus:
config_temp = os.getenv('GPU_TEMP_{}'.format(gpu['card_arch']), 75)
if float(gpu['temp']) > float(config_temp):
self.sns.publish('GPU {} is too hot, running {} {}, needs {} {}.'.format(gpu['card'], gpu['temp'], gpu['temp_units'], config_temp, gpu['temp_units'] ))
all_cool = False
if not all_cool:
logging.warn('High GPU temperature detected')
return all_cool
| 36.465909
| 234
| 0.58741
|
acfd9add2f6458fa120c4b0f5546e985a8fef452
| 3,799
|
py
|
Python
|
peerselect/experiments/exp2.py
|
ansonkahng16/impartiality
|
074b0e7c9a4a906b46a2c550350f536247555064
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
peerselect/experiments/exp2.py
|
ansonkahng16/impartiality
|
074b0e7c9a4a906b46a2c550350f536247555064
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
peerselect/experiments/exp2.py
|
ansonkahng16/impartiality
|
074b0e7c9a4a906b46a2c550350f536247555064
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import math
import csv
import numpy as np
import random
# import matplotlib
# import matplotlib.pyplot as plt
import pickle
import re
# hacky path update to import peerselect
import sys
sys.path.insert(0, '/Users/akahng/Dropbox/RESEARCH/Procaccia/17-impartiality/peerselection-master/peerselect')
from peerselect import impartial_all as impartial
from peerselect import profile_generator
from peerselect import distance_helper as dist
'''
Super hacky; please make nicer later!
'''
# Mallows stuff here
n = 10
p = 0.2
agents = np.arange(0,n)
profile = profile_generator.generate_mallows_mixture_profile(agents, agents, [1.0], [agents], [p])
cmps = []
# make this profile into a cmps matrix...
for key in profile.keys():
ranking = profile[key]
n = len(ranking)
for a in range(n):
for b in range(a+1, n):
cmps.append([key, ranking[a], ranking[b], 1])
# load input file and set up all preliminaries
exp_cmps1 = None
with open('/Users/akahng/Dropbox/RESEARCH/Procaccia/17-impartiality/final-matrix-s2-for-d-and-a.csv', 'r') as infile:
lines = infile.readlines()
lines = [line.strip().split(",") for line in lines]
lines = [[re.sub('[a-z]', '', line[0]), re.sub('[a-z]', '', line[1]), re.sub('[a-z]', '', line[2]), int(line[3])] for line in lines]
exp_cmps1 = lines
k = 6
def overlap(a,b):
overlap_list = list(set(a).intersection(set(b)))
return (len(overlap_list), len(overlap_list) / len(a))
num_trials = 10
def print_avgs(name, data):
print(name)
print('KT: {0}, FR: {1}, MD: {2}, CY: {3}, HM: {4}'.format(data[0], data[1], data[2], data[3], data[4]))
print('')
def repeat(cmps, k, num_trials, voting_rule):
comm_dists = []
bip_dists = []
kp_dists = []
if voting_rule == impartial.kemeny:
print('Using Kemeny')
ref_sol = impartial.solve_kemeny(cmps)
elif voting_rule == impartial.borda:
print('Using Borda')
ref_sol = impartial.solve_borda(cmps)
else:
sys.exit('Invalid voting rule.')
for x in range(num_trials):
comm_sol = impartial.committee_naive(cmps, k, voting_rule)
bip_sol = impartial.bipartite(cmps, k, voting_rule)
kp_sol = impartial.kpartite(cmps, k, voting_rule)
# print(comm_sol)
# print(bip_sol)
# print(kp_sol)
# print(ref_sol)
comm_KT = dist.dKT(ref_sol, comm_sol)
bip_KT = dist.dKT(ref_sol, bip_sol)
kp_KT = dist.dKT(ref_sol, kp_sol)
comm_FR = dist.dFR(ref_sol, comm_sol)
bip_FR = dist.dFR(ref_sol, bip_sol)
kp_FR = dist.dFR(ref_sol, kp_sol)
comm_MD = dist.dMD(ref_sol, comm_sol)
bip_MD = dist.dMD(ref_sol, bip_sol)
kp_MD = dist.dMD(ref_sol, kp_sol)
comm_CY = dist.dCY(ref_sol, comm_sol)
bip_CY = dist.dCY(ref_sol, bip_sol)
kp_CY = dist.dCY(ref_sol, kp_sol)
comm_HM = dist.dHM(ref_sol, comm_sol)
bip_HM = dist.dHM(ref_sol, bip_sol)
kp_HM = dist.dHM(ref_sol, kp_sol)
bip_dists.append((bip_KT, bip_FR, bip_MD, bip_CY, bip_HM))
comm_dists.append((comm_KT, comm_FR, comm_MD, comm_CY, comm_HM))
kp_dists.append((kp_KT, kp_FR, kp_MD, kp_CY, kp_HM))
# comm_sol = impartial.committee_naive(cmps, k, impartial.borda)
# bip_sol = impartial.bipartite(cmps, k, impartial.borda)
# kp_sol = impartial.kpartite(cmps, k, impartial.borda)
avg_comm_dists = list(np.mean(np.array(comm_dists), axis=0))
avg_bip_dists = list(np.mean(np.array(bip_dists), axis=0))
avg_kp_dists = list(np.mean(np.array(kp_dists), axis=0))
print_avgs('comm', avg_comm_dists)
print_avgs('bip', avg_bip_dists)
print_avgs('kp', avg_kp_dists)
# print(avg_comm_dists, avg_bip_dists, avg_kp_dists)
print('comm: {0}, bip: {1}, kp: {2}'.format(comm_dists, bip_dists, kp_dists))
return (comm_dists, bip_dists, kp_dists)
def main():
# repeat(exp_cmps1, k, num_trials)
repeat(cmps, k, num_trials, impartial.kemeny)
repeat(cmps, k, num_trials, impartial.borda)
if __name__ == '__main__':
main()
| 28.780303
| 136
| 0.706238
|
acfd9be5b233282f0a7b39596b6a11839935a537
| 11,256
|
py
|
Python
|
src/cogent3/align/pairwise_pogs_numba.py
|
xingjianleng/cogent3
|
a85d08a948f6903e4e04eea8292f588cc0b4907e
|
[
"BSD-3-Clause"
] | null | null | null |
src/cogent3/align/pairwise_pogs_numba.py
|
xingjianleng/cogent3
|
a85d08a948f6903e4e04eea8292f588cc0b4907e
|
[
"BSD-3-Clause"
] | null | null | null |
src/cogent3/align/pairwise_pogs_numba.py
|
xingjianleng/cogent3
|
a85d08a948f6903e4e04eea8292f588cc0b4907e
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from numba import boolean, float64, int64, njit, optional, uint8
from numba.core.types.containers import Tuple
__author__ = "Peter Maxwell"
__copyright__ = "Copyright 2007-2022, The Cogent Project"
__credits__ = ["Peter Maxwell", "Gavin Huttley", "Stephen Ma"]
__license__ = "BSD-3"
__version__ = "2022.4.20a1"
__maintainer__ = "Gavin Huttley"
__email__ = "Gavin.Huttley@anu.edu.au"
__status__ = "Production"
@njit(
Tuple(types=(Tuple(types=(int64, int64)), int64, float64,))(
int64[::1],
int64[::1],
int64[::1],
int64,
int64,
int64,
int64,
optional(int64[::1]),
optional(int64[::1]),
optional(int64[::1]),
optional(int64[::1]),
int64[:, ::1],
float64[:, ::1],
float64[:, ::1],
float64[:, ::1],
float64[:, :, ::1],
optional(float64[:, :, ::1]),
float64,
optional(int64[:, :, ::1]),
optional(uint8[:, :, ::1]),
optional(int64[::1]),
boolean,
boolean,
boolean,
boolean,
),
cache=True,
)
def calc_rows(
plan,
x_index,
y_index,
i_low,
i_high,
j_low,
j_high,
i_sources,
i_sources_offsets,
j_sources,
j_sources_offsets,
state_directions,
T,
xgap_scores,
ygap_scores,
match_scores,
mantissas,
mantissa,
exponents,
track,
track_enc,
viterbi,
local=False,
use_scaling=False,
use_logs=False,
):
assert not (use_logs and not viterbi)
assert not (use_logs and use_scaling)
assert not (local and not viterbi)
MIN_SCALE = -10000
MAX_SCALE = +10000
SCALE_STEP = 2.0 ** 50
MIN_FLOAT_VALUE = 1.0 / SCALE_STEP
source_row_index_cache = np.zeros(256)
N = max(T.shape[0], T.shape[1])
dest_states = max(0, state_directions.shape[0])
row_count = x_index.shape[0]
row_length = y_index.shape[0]
max_x = match_scores.shape[1]
max_y = match_scores.shape[2]
max_x = max(xgap_scores.shape[1], max_x)
max_y = max(ygap_scores.shape[1], max_y)
for i in range(row_count):
assert 0 <= x_index[i] <= max_x
for j in range(row_length):
assert 0 <= y_index[j] <= max_y
assert j_low >= 0 and j_high > j_low and j_high <= row_length
row_length = max(mantissas.shape[1], row_length)
N = max(mantissas.shape[2], N)
if use_scaling:
row_length = max(exponents.shape[1], row_length)
N = max(exponents.shape[2], N)
if use_logs:
impossible = -np.inf
else:
impossible = 0.0
if viterbi and track is not None and track_enc is not None:
N = max(track.shape[2], N)
(tcode_x, tcode_y, tcode_s) = track_enc
else:
track = None
tcode_x = tcode_y = tcode_s = 0
overall_max_exponent = MIN_SCALE
overall_max_mantissa = impossible
last_i = last_j = last_state = -1
max_exponent = MIN_SCALE
for i in range(i_low, i_high):
x = x_index[i]
i_sources_start = i_sources_offsets[i]
i_sources_end = i_sources_offsets[i + 1]
current_row_index = plan[i]
source_row_index_cache[0] = current_row_index
a_count = i_sources_end - i_sources_start
for a in range(a_count):
prev_i = i_sources[a + i_sources_start]
source_row_index_cache[a + 1] = plan[prev_i]
if i == 0:
if use_logs:
mantissas[current_row_index, 0, 0] = 0.0
else:
mantissas[current_row_index, 0, 0] = 1.0
if use_scaling:
exponents[current_row_index, 0, 0] = 0
else:
mantissas[current_row_index, 0, 0] = impossible
if use_scaling:
exponents[current_row_index, 0, 0] = MIN_SCALE
j_sources_end = j_sources_offsets[j_low]
for j in range(j_low, j_high):
j_sources_start = j_sources_end
j_sources_end = j_sources_offsets[j + 1]
for dest_state in range(dest_states):
state = state_directions[dest_state, 0]
bin = state_directions[dest_state, 1]
dx = state_directions[dest_state, 2]
dy = state_directions[dest_state, 3]
max_mantissa = impossible
max_exponent = MIN_SCALE
partial_sum = 0.0
pointer_state = N
if dx:
a_low = 1
a_high = a_count + 1
else:
a_low = 0
a_high = 1
if dy:
b_low = 1
b_high = j_sources_end - j_sources_start + 1
else:
b_low = 0
b_high = 1
pointer_a = 0
pointer_b = 0
if use_scaling:
sub_partial_sum = 0.0
for a in range(a_low, a_high):
source_row_index = int(source_row_index_cache[a])
for b in range(b_low, b_high):
if dy:
prev_j = j_sources[b - 1 + j_sources_start]
else:
prev_j = j
min_prev_state = prev_j > 0
for prev_state in range(min_prev_state, N):
exponent = exponents[
source_row_index, prev_j, prev_state
]
if exponent == MIN_SCALE:
continue
transition = T[prev_state, state]
mantissa = mantissas[
source_row_index, prev_j, prev_state
]
mantissa *= transition
if mantissa < MIN_FLOAT_VALUE:
if mantissa == 0.0:
continue
assert mantissa >= 0.0 and transition >= 0.0
while mantissa < MIN_FLOAT_VALUE:
mantissa *= SCALE_STEP
exponent += -1
assert exponent > MIN_SCALE
elif mantissa > 1.0:
mantissa *= MIN_FLOAT_VALUE
exponent += 1
assert exponent <= MAX_SCALE
if exponent > max_exponent:
if exponent == max_exponent + 1:
sub_partial_sum = partial_sum
else:
sub_partial_sum = 0.0
partial_sum = 0.0
max_mantissa = 0.0
max_exponent = exponent
if exponent == max_exponent:
partial_sum += mantissa
if viterbi and mantissa > max_mantissa:
max_mantissa = mantissa
pointer_state = prev_state
pointer_a = a
pointer_b = b
elif exponent == max_exponent - 1:
sub_partial_sum += mantissa
partial_sum += sub_partial_sum * MIN_FLOAT_VALUE
else:
for a in range(a_low, a_high):
source_row_index = int(source_row_index_cache[a])
for b in range(b_low, b_high):
if dy:
prev_j = j_sources[b - 1 + j_sources_start]
else:
prev_j = j
min_prev_state = prev_j > 0
for prev_state in range(min_prev_state, N):
mantissa = mantissas[
source_row_index, prev_j, prev_state
]
transition = T[prev_state, state]
if use_logs:
mantissa += transition
else:
mantissa *= transition
partial_sum += mantissa
if viterbi and mantissa > max_mantissa:
max_mantissa = mantissa
pointer_state = prev_state
pointer_a = a
pointer_b = b
if viterbi:
mantissa = max_mantissa
if track is not None:
track[i, j, state] = (
(pointer_a << tcode_x)
| (pointer_b << tcode_y)
| (pointer_state << tcode_s)
)
else:
mantissa = partial_sum
if dy:
y = y_index[j]
if dx:
d_score = match_scores[bin, x, y]
else:
d_score = ygap_scores[bin, y]
elif dx:
d_score = xgap_scores[bin, x]
elif use_logs:
d_score = 0.0
else:
d_score = 1.0
if use_logs:
mantissa += d_score
else:
mantissa *= d_score
mantissas[current_row_index, j, state] = mantissa
if use_scaling:
exponents[current_row_index, j, state] = max_exponent
if local and dx and dy:
if (use_scaling and max_exponent > overall_max_exponent) or (
(not use_scaling or max_exponent == overall_max_exponent)
and (mantissa > overall_max_mantissa)
):
overall_max_exponent = max_exponent
overall_max_mantissa = mantissa
last_i = i
last_j = j
last_state = state
if not local:
last_i = i_high - 1
last_j = j_high - 1
last_state = state
else:
mantissa = overall_max_mantissa
max_exponent = overall_max_exponent
if use_scaling:
score = np.log(mantissa) + np.log(SCALE_STEP) * max_exponent
elif use_logs:
score = mantissa
else:
score = np.log(mantissa)
return ((last_i, last_j), last_state, score)
| 33.400593
| 81
| 0.445629
|
acfd9caff446d6bd74ee3d4a2a350c9ffeceb165
| 5,777
|
py
|
Python
|
webui/gendata.py
|
xuduo35/StyleFlowPytorch
|
4202f1c9b4dcfa355d3ab40b13900e2d969cb1b5
|
[
"MIT"
] | 12
|
2021-12-21T05:14:32.000Z
|
2022-03-07T09:35:58.000Z
|
webui/gendata.py
|
xuduo35/StyleFlowPytorch
|
4202f1c9b4dcfa355d3ab40b13900e2d969cb1b5
|
[
"MIT"
] | 2
|
2022-01-23T14:24:14.000Z
|
2022-02-07T05:37:44.000Z
|
webui/gendata.py
|
xuduo35/StyleFlowPytorch
|
4202f1c9b4dcfa355d3ab40b13900e2d969cb1b5
|
[
"MIT"
] | 1
|
2021-12-30T10:05:06.000Z
|
2021-12-30T10:05:06.000Z
|
import json
import os
import sys
import glob
import pickle
import random
import numpy as np
import cv2 as cv
import torch
from torchvision import transforms
from imageencoder import encoder_init, encode_real_images
from DPR import dpr_init, get_lightvec
from ffhq_dataset.face_alignment import image_align
from ffhq_dataset.landmarks_detector import LandmarksDetector
landmarks_model_path = "../mymodels/shape_predictor_68_face_landmarks.dat"
landmarks_detector = LandmarksDetector(landmarks_model_path)
def getface(imagepath):
face_landmarks = landmarks_detector.get_landmarks(imagepath)
face_landmarks = list(face_landmarks)
if len(face_landmarks) == 0:
return None
img = image_align(imagepath, None, face_landmarks[0], output_size=1024)
return img
name_list = ['beauty']
expression_dict = {0: 'none', 1: 'smile', 2: 'laugh'}
face_shape_dict = {0: 'square', 1: 'oval', 2: 'heart', 3: 'round', 4: 'triangle'}
face_type_dict = {0: 'human', 1: 'cartoon'}
gender_dict = {0: 'female', 1: 'male'}
glasses_dict = {0: 'none', 1: 'sun', 2: 'common'}
race_dict = {0: 'yellow', 1: 'white', 2: 'black', 3: 'arabs'}
def idx2name(idx, tag):
name = None
if tag == 'expression':
name = expression_dict[idx]
elif tag == 'face_shape':
name = face_shape_dict[idx]
elif tag == 'face_type':
name = face_type_dict[idx]
elif tag == 'gender':
name = gender_dict[idx]
elif tag == 'glasses':
name = glasses_dict[idx]
elif tag == 'race':
name = race_dict[idx]
return name
def name2idx(name):
lookup_table = {'none': 0, 'smile': 1, 'laugh': 2,
'square': 0, 'oval': 1, 'heart': 2, 'round': 3, 'triangle': 4,
'human': 0, 'cartoon': 1,
'female': 0, 'male': 1,
'sun': 1, 'common': 2,
'yellow': 0, 'white': 1, 'black': 2, 'arabs': 3}
return lookup_table[name]
if __name__ == "__main__":
samples = glob.glob(sys.argv[1]+"/*.*")
device = 'cuda'
encoder, G, dlatent_avg = encoder_init(device)
lightmodel = dpr_init(device)
checkpoint = '../mymodels/face-attributes_scripted.pt'
faceattr_model = torch.jit.load(checkpoint, map_location=device)
faceattr_model = faceattr_model.to(device)
faceattr_model.eval()
faceattr_trans = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
dlatents_arr = np.zeros((len(samples),1,18,512), dtype=np.float32)
faceattr_arr = np.zeros((len(samples),8,1), dtype=np.float32)
lights = np.zeros((len(samples),1,9,1,1), dtype=np.float32)
for i, full_path in enumerate(samples):
print(full_path)
faceimg = getface(full_path)
if faceimg is None:
continue
faceimg = np.array(faceimg)
# encode image
dlatents = encode_real_images(
device, G, encoder, dlatent_avg, faceimg, truncation_psi=0.5, num_steps=1
)
dlatents_arr[i,0,:,:] = dlatents
out_light = get_lightvec(device, lightmodel, faceimg)
lights[i,:,:,:,:] = out_light.detach().cpu().numpy()
# xxx: accuracy of age and eyeglasses is very poor
#faceimg = cv.resize(faceimg, (224,224))
#faceimg = faceimg[..., ::-1] # RGB
#faceimg = transforms.ToPILImage()(faceimg)
faceimg = faceimg.resize((224,224))
faceimg = faceattr_trans(faceimg)
inputs = torch.unsqueeze(faceimg, 0).float().to(device)
with torch.no_grad():
reg_out, expression_out, gender_out, glasses_out, race_out = faceattr_model(inputs)
reg_out = reg_out.cpu().numpy()
age_out = reg_out[:, 0]
pitch_out = reg_out[:, 1]
roll_out = reg_out[:, 2]
yaw_out = reg_out[:, 3]
beauty_out = reg_out[:, 4]
_, expression_out = expression_out.topk(1, 1, True, True)
_, gender_out = gender_out.topk(1, 1, True, True)
_, glasses_out = glasses_out.topk(1, 1, True, True)
_, race_out = race_out.topk(1, 1, True, True)
expression_out = expression_out.cpu().numpy()
gender_out = gender_out.cpu().numpy()
glasses_out = glasses_out.cpu().numpy()
race_out = race_out.cpu().numpy()
age = int(age_out[0] * 100)
pitch = float('{0:.2f}'.format(pitch_out[0] * 360 - 180))
roll = float('{0:.2f}'.format(roll_out[0] * 360 - 180))
yaw = float('{0:.2f}'.format(yaw_out[0] * 360 - 180))
beauty = float('{0:.2f}'.format(beauty_out[0] * 100))
expression = idx2name(int(expression_out[0][0]), 'expression')
gender = idx2name(int(gender_out[0][0]), 'gender')
glasses = idx2name(int(glasses_out[0][0]), 'glasses')
race = idx2name(int(race_out[0][0]), 'race')
# 23 18.3 1.65 33.47 none male none white
print(age, pitch, yaw, beauty, expression, gender, glasses, race)
# ['Gender', 'Glasses', 'Yaw', 'Pitch', 'Baldness', 'Beard', 'Age', 'Expression']
faceattr_arr[i,0,0] = 0. if gender == 'female' else 1.
faceattr_arr[i,1,0] = 0. if glasses == 'none' else 1.
faceattr_arr[i,2,0] = max(-20,min(yaw,20))
faceattr_arr[i,3,0] = max(-20,min(pitch,20))
faceattr_arr[i,4,0] = 0.5
faceattr_arr[i,5,0] = 0.5
faceattr_arr[i,6,0] = min(age_out[0],65)
faceattr_arr[i,7,0] = 0. if glasses == 'none' else 1.
np.save("./data/dlatents.npy", dlatents_arr)
np.save("./data/attributes.npy", faceattr_arr)
np.save("./data/lights.npy", lights)
| 35.881988
| 95
| 0.59685
|
acfd9d6224c210eacaf302a6fe52d3f29ff0d6b1
| 7,522
|
py
|
Python
|
python/oneflow/test_utils/oneflow_pytorch_compatiblity/oneflow_pytorch_compatiblity_test.py
|
felixhao28/oneflow
|
e558af6ef6c4ed90e4abc7bc1ba895f55795626d
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test_utils/oneflow_pytorch_compatiblity/oneflow_pytorch_compatiblity_test.py
|
felixhao28/oneflow
|
e558af6ef6c4ed90e4abc7bc1ba895f55795626d
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test_utils/oneflow_pytorch_compatiblity/oneflow_pytorch_compatiblity_test.py
|
felixhao28/oneflow
|
e558af6ef6c4ed90e4abc7bc1ba895f55795626d
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import importlib.util
import unittest
import numpy as np
import time
import tempfile
import argparse
import oneflow as flow
import torch
import oneflow.unittest
import shutil
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
verbose = os.getenv("ONEFLOW_TEST_VERBOSE") is not None
def cos_sim(vector_a, vector_b):
vector_a = np.mat(vector_a)
vector_b = np.mat(vector_b)
num = float(vector_a * vector_b.T)
denom = np.linalg.norm(vector_a) * np.linalg.norm(vector_b)
cos = num / denom
sim = 0.5 + 0.5 * cos
return sim
def import_file(source):
with tempfile.NamedTemporaryFile("w", suffix=".py") as f:
f.write(source)
f.flush()
spec = importlib.util.spec_from_file_location("mod", f.name)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
def get_loss(
image_nd,
label_nd,
model_path: str,
module_name: str,
test_pytorch: bool = True,
device: str = "cuda",
tmpdirname: str = "/tmp",
):
model_loss = []
learning_rate = 0.01
mom = 0.9
bp_iters = 100
for_time = 0.0
bp_time = 0.0
update_time = 0.0
if test_pytorch == True:
image = flow.tensor(image_nd)
label = flow.tensor(label_nd)
corss_entropy = flow.nn.CrossEntropyLoss(reduction="mean")
with open(model_path) as f:
buf = f.read()
lines = buf.split("\n")
buf = "\n".join(lines)
python_module = import_file(buf)
Net = getattr(python_module, module_name)
pytorch_model = Net()
w = pytorch_model.state_dict()
new_parameters = dict()
for k, v in w.items():
if "num_batches_tracked" not in k:
new_parameters[k] = flow.tensor(w[k].detach().numpy())
flow.save(new_parameters, tmpdirname)
pytorch_model.to(device)
torch_sgd = torch.optim.SGD(
pytorch_model.parameters(), lr=learning_rate, momentum=mom
)
image = torch.tensor(image_nd)
image_gpu = image.to(device)
corss_entropy = torch.nn.CrossEntropyLoss()
corss_entropy.to(device)
label = torch.tensor(label_nd, dtype=torch.long).to(device)
print("start pytorch training loop....")
start_t = time.time()
for i in range(bp_iters):
s_t = time.time()
logits = pytorch_model(image_gpu)
loss = corss_entropy(logits, label)
for_time += time.time() - s_t
s_t = time.time()
loss.backward()
bp_time += time.time() - s_t
model_loss.append(loss.detach().cpu().numpy())
s_t = time.time()
torch_sgd.step()
torch_sgd.zero_grad()
update_time += time.time() - s_t
end_t = time.time()
if verbose:
print(
"pytorch traning loop avg time : {}".format(
(end_t - start_t) / bp_iters
)
)
print("forward avg time : {}".format(for_time / bp_iters))
print("backward avg time : {}".format(bp_time / bp_iters))
print("update parameters avg time : {}".format(update_time / bp_iters))
else:
with open(model_path) as f:
buf = f.read()
lines = buf.split("\n")
for i, line in enumerate(lines):
if (
i > 15 and "import" not in line and len(line.strip()) != 0
): # 15 means license
break
lines = (
lines[:i]
+ [
"import oneflow as torch",
"import oneflow.nn as nn",
"from oneflow import Tensor",
"from oneflow.nn import Parameter",
]
+ lines[i:]
)
buf = "\n".join(lines)
python_module = import_file(buf)
Net = getattr(python_module, module_name)
oneflow_model = Net()
image = flow.tensor(image_nd)
label = flow.tensor(label_nd)
corss_entropy = flow.nn.CrossEntropyLoss(reduction="mean")
image_gpu = image.to(device)
label = label.to(device)
oneflow_model.to(device)
corss_entropy.to(device)
params = flow.load(tmpdirname)
oneflow_model.load_state_dict(params)
of_sgd = flow.optim.SGD(
oneflow_model.parameters(), lr=learning_rate, momentum=mom
)
print("start oneflow training loop....")
start_t = time.time()
for i in range(bp_iters):
s_t = time.time()
logits = oneflow_model(image_gpu)
loss = corss_entropy(logits, label)
for_time += time.time() - s_t
s_t = time.time()
loss.backward()
bp_time += time.time() - s_t
model_loss.append(loss.numpy())
s_t = time.time()
of_sgd.step()
of_sgd.zero_grad()
update_time += time.time() - s_t
end_t = time.time()
if verbose:
print(
"oneflow traning loop avg time : {}".format(
(end_t - start_t) / bp_iters
)
)
print("forward avg time : {}".format(for_time / bp_iters))
print("backward avg time : {}".format(bp_time / bp_iters))
print("update parameters avg time : {}".format(update_time / bp_iters))
return model_loss
def do_test_train_loss_oneflow_pytorch(
test_case, model_path: str, module_name: str, device: str = "cuda",
):
batch_size = 16
image_nd = np.random.rand(batch_size, 3, 224, 224).astype(np.float32)
label_nd = np.array([e for e in range(batch_size)], dtype=np.int32)
oneflow_model_loss = []
pytorch_model_loss = []
with tempfile.TemporaryDirectory() as tmpdirname:
pytorch_model_loss = get_loss(
image_nd, label_nd, model_path, module_name, True, "cuda", tmpdirname
)
oneflow_model_loss = get_loss(
image_nd, label_nd, model_path, module_name, False, "cuda", tmpdirname
)
if verbose:
indes = [i for i in range(len(oneflow_model_loss))]
plt.plot(indes, oneflow_model_loss, label="oneflow")
plt.plot(indes, pytorch_model_loss, label="pytorch")
plt.xlabel("iter - axis")
# Set the y axis label of the current axis.
plt.ylabel("loss - axis")
# Set a title of the current axes.
plt.title("compare ")
# show a legend on the plot
plt.legend()
# Display a figure.
plt.savefig("./loss_compare.png")
plt.show()
test_case.assertTrue(
np.allclose(cos_sim(oneflow_model_loss, pytorch_model_loss), 1.0, 1e-1, 1e-1)
)
| 29.731225
| 85
| 0.580032
|
acfd9ecaf98fdd1695eb1fbe712f34d1a4f5482d
| 13,352
|
py
|
Python
|
addsvc/py-grpc/addsvc_pb2.py
|
moul/grpcbin-proto
|
bca18df4138cc423a9f8513f9c7d5f71f1ea4b35
|
[
"MIT"
] | null | null | null |
addsvc/py-grpc/addsvc_pb2.py
|
moul/grpcbin-proto
|
bca18df4138cc423a9f8513f9c7d5f71f1ea4b35
|
[
"MIT"
] | null | null | null |
addsvc/py-grpc/addsvc_pb2.py
|
moul/grpcbin-proto
|
bca18df4138cc423a9f8513f9c7d5f71f1ea4b35
|
[
"MIT"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: addsvc.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='addsvc.proto',
package='addsvc',
syntax='proto3',
serialized_pb=_b('\n\x0c\x61\x64\x64svc.proto\x12\x06\x61\x64\x64svc\"\"\n\nSumRequest\x12\t\n\x01\x61\x18\x01 \x01(\x03\x12\t\n\x01\x62\x18\x02 \x01(\x03\"\"\n\x08SumReply\x12\t\n\x01v\x18\x01 \x01(\x03\x12\x0b\n\x03\x65rr\x18\x02 \x01(\t\"%\n\rConcatRequest\x12\t\n\x01\x61\x18\x01 \x01(\t\x12\t\n\x01\x62\x18\x02 \x01(\t\"%\n\x0b\x43oncatReply\x12\t\n\x01v\x18\x01 \x01(\t\x12\x0b\n\x03\x65rr\x18\x02 \x01(\t2l\n\x03\x41\x64\x64\x12-\n\x03Sum\x12\x12.addsvc.SumRequest\x1a\x10.addsvc.SumReply\"\x00\x12\x36\n\x06\x43oncat\x12\x15.addsvc.ConcatRequest\x1a\x13.addsvc.ConcatReply\"\x00\x62\x06proto3')
)
_SUMREQUEST = _descriptor.Descriptor(
name='SumRequest',
full_name='addsvc.SumRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='a', full_name='addsvc.SumRequest.a', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='b', full_name='addsvc.SumRequest.b', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=24,
serialized_end=58,
)
_SUMREPLY = _descriptor.Descriptor(
name='SumReply',
full_name='addsvc.SumReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='v', full_name='addsvc.SumReply.v', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='err', full_name='addsvc.SumReply.err', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=60,
serialized_end=94,
)
_CONCATREQUEST = _descriptor.Descriptor(
name='ConcatRequest',
full_name='addsvc.ConcatRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='a', full_name='addsvc.ConcatRequest.a', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='b', full_name='addsvc.ConcatRequest.b', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=96,
serialized_end=133,
)
_CONCATREPLY = _descriptor.Descriptor(
name='ConcatReply',
full_name='addsvc.ConcatReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='v', full_name='addsvc.ConcatReply.v', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='err', full_name='addsvc.ConcatReply.err', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=135,
serialized_end=172,
)
DESCRIPTOR.message_types_by_name['SumRequest'] = _SUMREQUEST
DESCRIPTOR.message_types_by_name['SumReply'] = _SUMREPLY
DESCRIPTOR.message_types_by_name['ConcatRequest'] = _CONCATREQUEST
DESCRIPTOR.message_types_by_name['ConcatReply'] = _CONCATREPLY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SumRequest = _reflection.GeneratedProtocolMessageType('SumRequest', (_message.Message,), dict(
DESCRIPTOR = _SUMREQUEST,
__module__ = 'addsvc_pb2'
# @@protoc_insertion_point(class_scope:addsvc.SumRequest)
))
_sym_db.RegisterMessage(SumRequest)
SumReply = _reflection.GeneratedProtocolMessageType('SumReply', (_message.Message,), dict(
DESCRIPTOR = _SUMREPLY,
__module__ = 'addsvc_pb2'
# @@protoc_insertion_point(class_scope:addsvc.SumReply)
))
_sym_db.RegisterMessage(SumReply)
ConcatRequest = _reflection.GeneratedProtocolMessageType('ConcatRequest', (_message.Message,), dict(
DESCRIPTOR = _CONCATREQUEST,
__module__ = 'addsvc_pb2'
# @@protoc_insertion_point(class_scope:addsvc.ConcatRequest)
))
_sym_db.RegisterMessage(ConcatRequest)
ConcatReply = _reflection.GeneratedProtocolMessageType('ConcatReply', (_message.Message,), dict(
DESCRIPTOR = _CONCATREPLY,
__module__ = 'addsvc_pb2'
# @@protoc_insertion_point(class_scope:addsvc.ConcatReply)
))
_sym_db.RegisterMessage(ConcatReply)
_ADD = _descriptor.ServiceDescriptor(
name='Add',
full_name='addsvc.Add',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=174,
serialized_end=282,
methods=[
_descriptor.MethodDescriptor(
name='Sum',
full_name='addsvc.Add.Sum',
index=0,
containing_service=None,
input_type=_SUMREQUEST,
output_type=_SUMREPLY,
options=None,
),
_descriptor.MethodDescriptor(
name='Concat',
full_name='addsvc.Add.Concat',
index=1,
containing_service=None,
input_type=_CONCATREQUEST,
output_type=_CONCATREPLY,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_ADD)
DESCRIPTOR.services_by_name['Add'] = _ADD
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class AddStub(object):
"""The Add service definition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Sum = channel.unary_unary(
'/addsvc.Add/Sum',
request_serializer=SumRequest.SerializeToString,
response_deserializer=SumReply.FromString,
)
self.Concat = channel.unary_unary(
'/addsvc.Add/Concat',
request_serializer=ConcatRequest.SerializeToString,
response_deserializer=ConcatReply.FromString,
)
class AddServicer(object):
"""The Add service definition.
"""
def Sum(self, request, context):
"""Sums two integers.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Concat(self, request, context):
"""Concatenates two strings
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AddServicer_to_server(servicer, server):
rpc_method_handlers = {
'Sum': grpc.unary_unary_rpc_method_handler(
servicer.Sum,
request_deserializer=SumRequest.FromString,
response_serializer=SumReply.SerializeToString,
),
'Concat': grpc.unary_unary_rpc_method_handler(
servicer.Concat,
request_deserializer=ConcatRequest.FromString,
response_serializer=ConcatReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'addsvc.Add', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaAddServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""The Add service definition.
"""
def Sum(self, request, context):
"""Sums two integers.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Concat(self, request, context):
"""Concatenates two strings
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaAddStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""The Add service definition.
"""
def Sum(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Sums two integers.
"""
raise NotImplementedError()
Sum.future = None
def Concat(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Concatenates two strings
"""
raise NotImplementedError()
Concat.future = None
def beta_create_Add_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('addsvc.Add', 'Concat'): ConcatRequest.FromString,
('addsvc.Add', 'Sum'): SumRequest.FromString,
}
response_serializers = {
('addsvc.Add', 'Concat'): ConcatReply.SerializeToString,
('addsvc.Add', 'Sum'): SumReply.SerializeToString,
}
method_implementations = {
('addsvc.Add', 'Concat'): face_utilities.unary_unary_inline(servicer.Concat),
('addsvc.Add', 'Sum'): face_utilities.unary_unary_inline(servicer.Sum),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Add_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('addsvc.Add', 'Concat'): ConcatRequest.SerializeToString,
('addsvc.Add', 'Sum'): SumRequest.SerializeToString,
}
response_deserializers = {
('addsvc.Add', 'Concat'): ConcatReply.FromString,
('addsvc.Add', 'Sum'): SumReply.FromString,
}
cardinalities = {
'Concat': cardinality.Cardinality.UNARY_UNARY,
'Sum': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'addsvc.Add', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| 33.463659
| 604
| 0.716222
|
acfd9f268221118e2a0ff6d5807be9933bc1271e
| 11,046
|
py
|
Python
|
airflow/providers/qubole/operators/qubole.py
|
bluecolor/airflow
|
d79e7221de76f01b5cd36c15224b59e8bb451c90
|
[
"Apache-2.0"
] | null | null | null |
airflow/providers/qubole/operators/qubole.py
|
bluecolor/airflow
|
d79e7221de76f01b5cd36c15224b59e8bb451c90
|
[
"Apache-2.0"
] | null | null | null |
airflow/providers/qubole/operators/qubole.py
|
bluecolor/airflow
|
d79e7221de76f01b5cd36c15224b59e8bb451c90
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Qubole operator"""
import re
from typing import Iterable
from airflow.hooks.base_hook import BaseHook
from airflow.models import BaseOperator, BaseOperatorLink
from airflow.models.taskinstance import TaskInstance
from airflow.providers.qubole.hooks.qubole import (
COMMAND_ARGS, HYPHEN_ARGS, POSITIONAL_ARGS, QuboleHook, flatten_list,
)
from airflow.utils.decorators import apply_defaults
class QDSLink(BaseOperatorLink):
"""Link to QDS"""
name = 'Go to QDS'
def get_link(self, operator, dttm):
"""
Get link to qubole command result page.
:param operator: operator
:param dttm: datetime
:return: url link
"""
ti = TaskInstance(task=operator, execution_date=dttm)
conn = BaseHook.get_connection(
getattr(operator, "qubole_conn_id", None) or operator.kwargs['qubole_conn_id'])
if conn and conn.host:
host = re.sub(r'api$', 'v2/analyze?command_id=', conn.host)
else:
host = 'https://api.qubole.com/v2/analyze?command_id='
qds_command_id = ti.xcom_pull(task_ids=operator.task_id, key='qbol_cmd_id')
url = host + str(qds_command_id) if qds_command_id else ''
return url
class QuboleOperator(BaseOperator):
"""
Execute tasks (commands) on QDS (https://qubole.com).
:param qubole_conn_id: Connection id which consists of qds auth_token
:type qubole_conn_id: str
kwargs:
:command_type: type of command to be executed, e.g. hivecmd, shellcmd, hadoopcmd
:tags: array of tags to be assigned with the command
:cluster_label: cluster label on which the command will be executed
:name: name to be given to command
:notify: whether to send email on command completion or not (default is False)
**Arguments specific to command types**
hivecmd:
:query: inline query statement
:script_location: s3 location containing query statement
:sample_size: size of sample in bytes on which to run query
:macros: macro values which were used in query
:sample_size: size of sample in bytes on which to run query
:hive-version: Specifies the hive version to be used. eg: 0.13,1.2,etc.
prestocmd:
:query: inline query statement
:script_location: s3 location containing query statement
:macros: macro values which were used in query
hadoopcmd:
:sub_commnad: must be one these ["jar", "s3distcp", "streaming"] followed by
1 or more args
shellcmd:
:script: inline command with args
:script_location: s3 location containing query statement
:files: list of files in s3 bucket as file1,file2 format. These files will be
copied into the working directory where the qubole command is being
executed.
:archives: list of archives in s3 bucket as archive1,archive2 format. These
will be unarchived into the working directory where the qubole command is
being executed
:parameters: any extra args which need to be passed to script (only when
script_location is supplied)
pigcmd:
:script: inline query statement (latin_statements)
:script_location: s3 location containing pig query
:parameters: any extra args which need to be passed to script (only when
script_location is supplied
sparkcmd:
:program: the complete Spark Program in Scala, R, or Python
:cmdline: spark-submit command line, all required information must be specify
in cmdline itself.
:sql: inline sql query
:script_location: s3 location containing query statement
:language: language of the program, Scala, R, or Python
:app_id: ID of an Spark job server app
:arguments: spark-submit command line arguments
:user_program_arguments: arguments that the user program takes in
:macros: macro values which were used in query
:note_id: Id of the Notebook to run
dbtapquerycmd:
:db_tap_id: data store ID of the target database, in Qubole.
:query: inline query statement
:macros: macro values which were used in query
dbexportcmd:
:mode: Can be 1 for Hive export or 2 for HDFS/S3 export
:schema: Db schema name assumed accordingly by database if not specified
:hive_table: Name of the hive table
:partition_spec: partition specification for Hive table.
:dbtap_id: data store ID of the target database, in Qubole.
:db_table: name of the db table
:db_update_mode: allowinsert or updateonly
:db_update_keys: columns used to determine the uniqueness of rows
:export_dir: HDFS/S3 location from which data will be exported.
:fields_terminated_by: hex of the char used as column separator in the dataset
:use_customer_cluster: To use cluster to run command
:customer_cluster_label: the label of the cluster to run the command on
:additional_options: Additional Sqoop options which are needed enclose options in
double or single quotes e.g. '--map-column-hive id=int,data=string'
dbimportcmd:
:mode: 1 (simple), 2 (advance)
:hive_table: Name of the hive table
:schema: Db schema name assumed accordingly by database if not specified
:hive_serde: Output format of the Hive Table
:dbtap_id: data store ID of the target database, in Qubole.
:db_table: name of the db table
:where_clause: where clause, if any
:parallelism: number of parallel db connections to use for extracting data
:extract_query: SQL query to extract data from db. $CONDITIONS must be part
of the where clause.
:boundary_query: Query to be used get range of row IDs to be extracted
:split_column: Column used as row ID to split data into ranges (mode 2)
:use_customer_cluster: To use cluster to run command
:customer_cluster_label: the label of the cluster to run the command on
:additional_options: Additional Sqoop options which are needed enclose options in
double or single quotes
.. note:
Following fields are template-supported : ``query``, ``script_location``,
``sub_command``, ``script``, ``files``, ``archives``, ``program``, ``cmdline``,
``sql``, ``where_clause``, ``extract_query``, ``boundary_query``, ``macros``,
``tags``, ``name``, ``parameters``, ``dbtap_id``, ``hive_table``, ``db_table``,
``split_column``, ``note_id``, ``db_update_keys``, ``export_dir``,
``partition_spec``, ``qubole_conn_id``, ``arguments``, ``user_program_arguments``.
You can also use ``.txt`` files for template driven use cases.
.. note:
In QuboleOperator there is a default handler for task failures and retries,
which generally kills the command running at QDS for the corresponding task
instance. You can override this behavior by providing your own failure and retry
handler in task definition.
"""
template_fields: Iterable[str] = (
'query', 'script_location', 'sub_command', 'script', 'files',
'archives', 'program', 'cmdline', 'sql', 'where_clause', 'tags',
'extract_query', 'boundary_query', 'macros', 'name', 'parameters',
'dbtap_id', 'hive_table', 'db_table', 'split_column', 'note_id',
'db_update_keys', 'export_dir', 'partition_spec', 'qubole_conn_id',
'arguments', 'user_program_arguments', 'cluster_label'
)
template_ext: Iterable[str] = ('.txt',)
ui_color = '#3064A1'
ui_fgcolor = '#fff'
qubole_hook_allowed_args_list = ['command_type', 'qubole_conn_id', 'fetch_logs']
operator_extra_links = (
QDSLink(),
)
@apply_defaults
def __init__(self, qubole_conn_id="qubole_default", **kwargs):
self.kwargs = kwargs
self.kwargs['qubole_conn_id'] = qubole_conn_id
self.hook = None
filtered_base_kwargs = self._get_filtered_args(kwargs)
super().__init__(**filtered_base_kwargs)
if self.on_failure_callback is None:
self.on_failure_callback = QuboleHook.handle_failure_retry
if self.on_retry_callback is None:
self.on_retry_callback = QuboleHook.handle_failure_retry
def _get_filtered_args(self, all_kwargs):
qubole_args = flatten_list(COMMAND_ARGS.values()) + HYPHEN_ARGS + \
flatten_list(POSITIONAL_ARGS.values()) + self.qubole_hook_allowed_args_list
return {key: value for key, value in all_kwargs.items() if key not in qubole_args}
def execute(self, context):
return self.get_hook().execute(context)
def on_kill(self, ti=None):
if self.hook:
self.hook.kill(ti)
else:
self.get_hook().kill(ti)
def get_results(self, ti=None, fp=None, inline=True, delim=None, fetch=True):
"""get_results from Qubole"""
return self.get_hook().get_results(ti, fp, inline, delim, fetch)
def get_log(self, ti):
"""get_log from Qubole"""
return self.get_hook().get_log(ti)
def get_jobs_id(self, ti):
"""get jobs_id from Qubole"""
return self.get_hook().get_jobs_id(ti)
def get_hook(self):
"""Reinitialising the hook, as some template fields might have changed"""
return QuboleHook(**self.kwargs)
def __getattribute__(self, name):
if name in QuboleOperator.template_fields:
if name in self.kwargs:
return self.kwargs[name]
else:
return ''
else:
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if name in QuboleOperator.template_fields:
self.kwargs[name] = value
else:
object.__setattr__(self, name, value)
| 45.270492
| 93
| 0.653268
|
acfda028f51726f64ed57b4aa1935bde40aefff2
| 7,037
|
py
|
Python
|
src/ggrc/migrations/versions/20181029161743_005108819b75_migrate_cycle_task_entry_to_comments.py
|
j0gurt/ggrc-core
|
84662dc85aa8864c907eabe70b8efccf92298a1f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-01-04T10:55:14.000Z
|
2019-01-04T10:55:14.000Z
|
src/ggrc/migrations/versions/20181029161743_005108819b75_migrate_cycle_task_entry_to_comments.py
|
j0gurt/ggrc-core
|
84662dc85aa8864c907eabe70b8efccf92298a1f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/migrations/versions/20181029161743_005108819b75_migrate_cycle_task_entry_to_comments.py
|
j0gurt/ggrc-core
|
84662dc85aa8864c907eabe70b8efccf92298a1f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Migrate cycle_task_entry to comments
Create Date: 2018-10-29 16:17:43.979116
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
from sqlalchemy import text
from ggrc.migrations import utils
from ggrc.migrations.utils import migrator
revision = '005108819b75'
down_revision = '871aaab0de41'
def load_data(conn):
"""Load all necessary data for migration"""
sql = """
SELECT
cte.id AS cte_id,
cte.description AS cte_description,
cte.created_at AS cte_created_at,
cte.modified_by_id AS cte_modified_by_id,
cte.updated_at AS cte_updated_at,
cte.cycle_task_group_object_task_id AS cgot_id,
IFNULL(GROUP_CONCAT(acl_acr.name SEPARATOR ','),'') AS assignee_type,
rel_union.id as old_rel_id
FROM
cycle_task_entries cte
JOIN(
SELECT id, source_id as cgot_id, destination_id as cte_id
FROM relationships
WHERE
source_type='CycleTaskGroupObjectTask' AND
destination_type='CycleTaskEntry'
UNION ALL
SELECT id, source_id as cte_id, destination_id as cgot_id
FROM relationships
WHERE
source_type='CycleTaskEntry' AND
destination_type = 'CycleTaskGroupObjectTask'
) as rel_union
ON
rel_union.cgot_id=cte.cycle_task_group_object_task_id AND
rel_union.cte_id=cte.id
LEFT OUTER JOIN(
SELECT
acl.id,
acr.name,
acl.object_type,
acl.object_id,
acp.person_id
FROM
access_control_list acl
JOIN access_control_roles acr ON acr.id = acl.ac_role_id
JOIN access_control_people acp ON acl.id = acp.ac_list_id
WHERE
acl.parent_id IS NULL AND
acl.object_type = 'CycleTaskGroupObjectTask'
)AS acl_acr
ON
acl_acr.object_id = cte.cycle_task_group_object_task_id AND
acl_acr.person_id = cte.modified_by_id
GROUP BY cte.id
"""
return conn.execute(text(sql)).fetchall()
def create_comment(conn, data):
"""Create new comment"""
sql = """
INSERT INTO comments(
description,
created_at,
modified_by_id,
updated_at,
assignee_type
)
VALUES (
:description, :created_at, :modified_by_id,
:updated_at, :assignee_type
)
"""
conn.execute(
text(sql),
description=data.cte_description,
created_at=data.cte_created_at,
modified_by_id=data.cte_modified_by_id,
updated_at=data.cte_updated_at,
assignee_type=data.assignee_type
)
comment_id = utils.last_insert_id(conn)
utils.add_to_objects_without_revisions(conn, comment_id, "Comment")
return comment_id
def add_admin_acl(conn, comment_id, user_id, comment_admin_acr_id):
"""Create Comment Admin ACL user_id -> CycleTaskEntry.modified_by_id"""
sql = """
INSERT INTO access_control_list(
ac_role_id,
object_id,
object_type,
created_at,
updated_at,
modified_by_id,
parent_id_nn
)VALUES(
:ac_role_id,
:object_id,
"Comment",
NOW(),
NOW(),
:modified_by_id,
0
)
"""
conn.execute(
text(sql),
ac_role_id=comment_admin_acr_id,
object_id=comment_id,
modified_by_id=user_id,
)
acl_id = utils.last_insert_id(conn)
utils.add_to_objects_without_revisions(conn, acl_id, "AccessControlList")
create_acp(conn, user_id, acl_id)
def create_acp(conn, person_id, ac_list_id):
"""Create acp entry"""
sql = """
INSERT INTO access_control_people(
person_id,
ac_list_id,
updated_at,
created_at
)VALUES(
:person_id,
:ac_list_id,
NOW(),
NOW()
)
"""
conn.execute(
text(sql),
person_id=person_id,
ac_list_id=ac_list_id
)
def create_relationship(conn, comment_id, cgot_id, migrator_id):
"""Create relationship between new Comment and CycleTaskGroupObjectTask"""
sql = """
INSERT INTO relationships(
modified_by_id,
created_at,
updated_at,
source_id,
source_type,
destination_id,
destination_type
) VALUES (
:modified_by_id,
NOW(),
NOW(),
:source_id,
:source_type,
:destination_id,
:destination_type
)
"""
conn.execute(
text(sql),
modified_by_id=migrator_id,
source_id=comment_id,
source_type="Comment",
destination_id=cgot_id,
destination_type="CycleTaskGroupObjectTask"
)
rel_id = utils.last_insert_id(conn)
utils.add_to_objects_without_revisions(conn, rel_id, "Relationship")
def remove_old_relationship(conn, old_comment_data):
"""Remove old relationships"""
old_rel_ids = [d.old_rel_id for d in old_comment_data]
if old_rel_ids:
conn.execute(
text("DELETE FROM relationships WHERE id IN :rel_ids"),
rel_ids=old_rel_ids
)
def remove_old_rel_revisions(conn, old_comment_data):
"""Remove old relationship revisions."""
old_rel_ids = [d.old_rel_id for d in old_comment_data]
if old_rel_ids:
conn.execute(
text("""
DELETE FROM revisions
WHERE resource_type = :rel_type AND resource_id IN :rel_ids
"""),
rel_type="Relationship",
rel_ids=old_rel_ids
)
def remove_cycle_task_entries(conn, old_comment_data):
"""Remove CycleTaskEntry data"""
cte_ids = [d.cte_id for d in old_comment_data]
if cte_ids:
conn.execute(
text("DELETE FROM cycle_task_entries WHERE id IN :cte_ids"),
cte_ids=cte_ids
)
utils.add_to_objects_without_revisions_bulk(
conn, cte_ids, "CycleTaskEntry", "deleted"
)
def get_comment_admin_role_id(conn):
"""Return Comment Admin role id"""
sql = """
SELECT id FROM access_control_roles
WHERE object_type='Comment' AND name='Admin'
"""
return conn.execute(text(sql)).fetchone().id
def run_data_migration():
"""Migration runner"""
conn = op.get_bind()
migrator_id = migrator.get_migration_user_id(conn)
old_comment_data = load_data(conn)
comment_admin_role_id = get_comment_admin_role_id(conn)
for data in old_comment_data:
comment_id = create_comment(conn, data)
add_admin_acl(
conn, comment_id, data.cte_modified_by_id, comment_admin_role_id
)
create_relationship(conn, comment_id, data.cgot_id, migrator_id)
remove_old_relationship(conn, old_comment_data)
remove_old_rel_revisions(conn, old_comment_data)
remove_cycle_task_entries(conn, old_comment_data)
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
run_data_migration()
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
raise Exception("Downgrade is not supported.")
| 26.961686
| 79
| 0.672304
|
acfda02ef920ef7e00fb735274462c21d39540f1
| 1,505
|
py
|
Python
|
archub/commands/task.py
|
rfldong/archub
|
b5bcc67862e37a132e144d8a6cd5e7417a220b24
|
[
"Apache-2.0"
] | 1
|
2018-04-20T19:11:29.000Z
|
2018-04-20T19:11:29.000Z
|
archub/commands/task.py
|
rfldong/archub
|
b5bcc67862e37a132e144d8a6cd5e7417a220b24
|
[
"Apache-2.0"
] | 34
|
2018-03-19T11:48:37.000Z
|
2019-03-21T15:55:33.000Z
|
archub/commands/task.py
|
rfldong/archub
|
b5bcc67862e37a132e144d8a6cd5e7417a220b24
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
from argparse import ArgumentParser
from archub import cmdline, config
from github import Github
from archub.core import print_issues, print_issues_with_labels
def main(args):
show_all_default = config.GITHUB_REPOSITORY_NAME is None
parser = ArgumentParser(
prog=cmdline.prog(__file__),
description='List all assigned issues' if show_all_default else
'List assigned issues for {}/{}'\
.format(
config.GITHUB_ORGANIZATION,
config.GITHUB_REPOSITORY_NAME)
)
parser.add_argument('-l', '--labels',
help='include labels in output', default=False,
action='store_true')
parser.add_argument('-a', '--all',
default=show_all_default,
action='store_true',
help='show all assigned issues [default: True]' if show_all_default else
'show all assigned issues, not just issues for {}/{} [default: False]'\
.format(config.GITHUB_ORGANIZATION, config.GITHUB_REPOSITORY_NAME))
parser.add_argument('-w', '--wide', default=False,
action='store_true', help='use the full width of the terminal')
parsed_args = parser.parse_args(args)
linewidth = config.TTY_COLS if parsed_args.wide else 80
if parsed_args.labels:
print_issues_with_labels(parsed_args.all, linewidth=linewidth)
else:
print_issues(parsed_args.all, linewidth=linewidth)
return 0
if '__main__' == __name__:
sys.exit(main(sys.argv))
| 38.589744
| 83
| 0.681063
|
acfda0c5d87e4de296a480b685435bbf7604f2e6
| 519
|
py
|
Python
|
examples/quick_start/minimize_08.py
|
petuum/tuun
|
8eec472dbf0e5e695449b0fa2d98985469fd5b30
|
[
"Apache-2.0"
] | 33
|
2020-08-30T16:22:35.000Z
|
2022-02-26T13:48:32.000Z
|
examples/quick_start/minimize_08.py
|
petuum/tuun
|
8eec472dbf0e5e695449b0fa2d98985469fd5b30
|
[
"Apache-2.0"
] | 2
|
2021-01-18T19:46:43.000Z
|
2021-03-24T09:59:14.000Z
|
examples/quick_start/minimize_08.py
|
petuum/tuun
|
8eec472dbf0e5e695449b0fa2d98985469fd5b30
|
[
"Apache-2.0"
] | 2
|
2020-08-25T17:02:15.000Z
|
2021-04-21T16:40:44.000Z
|
from tuun.main import Tuun
# instantiate Tuun
config = {
'seed': 12,
'acqfunction_config': {'name': 'default', 'acq_str': 'ucb'},
'probo_config': {'normalize_real': True},
}
tu = Tuun(config)
# set search space
search_space = [('real', [-20, 20]), ('real', [-100, 100])]
tu.set_config_from_list(search_space)
# define function to optimize
def f(x):
f_s = lambda x: x ** 4 - x ** 2 + 0.1 * x
return f_s(x[0]) + f_s(x[1])
# minimize function over search space
result = tu.minimize_function(f, 50)
| 23.590909
| 64
| 0.633911
|
acfda27a543c2e871d059066752131fa0fdfb4ea
| 773
|
py
|
Python
|
renzongxian/0004/0004.py
|
saurabh896/python-1
|
f8d3aedf4c0fe6e24dfa3269ea7e642c9f7dd9b7
|
[
"MIT"
] | 3,976
|
2015-01-01T15:49:39.000Z
|
2022-03-31T03:47:56.000Z
|
renzongxian/0004/0004.py
|
oyesam7/python-1
|
220734af09fa09a6f615d4f1b4612a0ab75d91d1
|
[
"MIT"
] | 97
|
2015-01-11T02:59:46.000Z
|
2022-03-16T14:01:56.000Z
|
renzongxian/0004/0004.py
|
oyesam7/python-1
|
220734af09fa09a6f615d4f1b4612a0ab75d91d1
|
[
"MIT"
] | 3,533
|
2015-01-01T06:19:30.000Z
|
2022-03-28T13:14:54.000Z
|
# Source:https://github.com/Show-Me-the-Code/show-me-the-code
# Author:renzongxian
# Date:2014-12-07
# Python 3.4
"""
第 0004 题:任一个英文的纯文本文件,统计其中的单词出现的个数。
"""
import sys
def word_count(file_path):
file_object = open(file_path, 'r')
word_num = 0
for line in file_object:
line_list = line.split()
word_num += len(line_list)
file_object.close()
return word_num
if __name__ == "__main__":
if len(sys.argv) <= 1:
print("Need at least 1 parameter. Try to execute 'python 0004.py $image_path'")
else:
for infile in sys.argv[1:]:
try:
print("The total number of words is ", word_count(infile))
except IOError:
print("Can't open file!")
pass
| 20.891892
| 87
| 0.595084
|
acfda2b561a5a0e57a3e190664ab3e2a3845ea04
| 61
|
py
|
Python
|
src/utils/__init__.py
|
Ursidours/pythonic_interviews
|
a88e10b82ed2a163dfcc0bfd1d01a9e9e606c045
|
[
"MIT"
] | 2
|
2021-11-13T01:30:25.000Z
|
2022-02-11T18:17:22.000Z
|
src/utils/__init__.py
|
arnaudblois/pythonic_interviews
|
a88e10b82ed2a163dfcc0bfd1d01a9e9e606c045
|
[
"MIT"
] | null | null | null |
src/utils/__init__.py
|
arnaudblois/pythonic_interviews
|
a88e10b82ed2a163dfcc0bfd1d01a9e9e606c045
|
[
"MIT"
] | null | null | null |
"""
Package containing useful functions, decorators, etc
"""
| 15.25
| 52
| 0.737705
|
acfda3e4c6881e50a15a8e2ba143f7802e04ef53
| 137
|
py
|
Python
|
ibm_mq/datadog_checks/ibm_mq/__about__.py
|
tzach/integrations-core
|
ac9daf60630bea4739947fe1d8df72c20bfcbc22
|
[
"BSD-3-Clause"
] | null | null | null |
ibm_mq/datadog_checks/ibm_mq/__about__.py
|
tzach/integrations-core
|
ac9daf60630bea4739947fe1d8df72c20bfcbc22
|
[
"BSD-3-Clause"
] | null | null | null |
ibm_mq/datadog_checks/ibm_mq/__about__.py
|
tzach/integrations-core
|
ac9daf60630bea4739947fe1d8df72c20bfcbc22
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
__version__ = '3.3.1'
| 27.4
| 59
| 0.722628
|
acfda441f79d5b98534fa7c7bec9a260355d8dbb
| 3,696
|
py
|
Python
|
test_iu.py
|
doheelab/stylegan2
|
76964cc73b8f05af1747f103107d0a3e278a8c5b
|
[
"BSD-Source-Code"
] | null | null | null |
test_iu.py
|
doheelab/stylegan2
|
76964cc73b8f05af1747f103107d0a3e278a8c5b
|
[
"BSD-Source-Code"
] | null | null | null |
test_iu.py
|
doheelab/stylegan2
|
76964cc73b8f05af1747f103107d0a3e278a8c5b
|
[
"BSD-Source-Code"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Toonify yourself의 빵형
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1WyUoMjpiTyOWt52KgwCBhzY1itDhUgvF
# Toonify yourself!
Please ensure that you're using a GPU runtime
First some setup:
"""
# Commented out IPython magic to ensure Python compatibility.
# %tensorflow_version 1.x
# Commented out IPython magic to ensure Python compatibility.
# !git clone https://github.com/justinpinkney/stylegan2
# %cd stylegan2
import os
!nvcc test_nvcc.cu -o test_nvcc -run
!mkdir raw
!mkdir aligned
!mkdir generated
"""## Upload your own photos
Upload your photos to `raw/`. These don't need to be aligned as we'll use a face detector to grab all the faces and transform them into the correct format. One note of caution is that you'll need a pretty high-resolution picture of a face to get a sharp result (the final face crop is resized to 1024x1024 pixels)
We'll grab a example image from the internet to work with.
The basic process is:
- Extract faces and align the images
- Project the images (i.e. find the latent code)
- Toonify the images (i.e. use the latent code with the toon model)
Results will be placed in the stylegan2/generated folder
"""
!wget https://upload.wikimedia.org/wikipedia/commons/6/6d/Shinz%C5%8D_Abe_Official.jpg -O raw/example.jpg
!wget https://upload.wikimedia.org/wikipedia/commons/9/95/191215_TVN_%EC%A6%90%EA%B1%B0%EC%9B%80%EC%A0%84_%ED%98%B8%ED%85%94%EB%8D%B8%EB%A3%A8%EB%82%98_%ED%86%A0%ED%81%AC%EC%84%B8%EC%85%98_%EC%95%84%EC%9D%B4%EC%9C%A0_%286%29.jpg -O raw/iu_01.jpg
!wget https://image.bugsm.co.kr/artist/images/1000/800491/80049126.jpg -O raw/iu_02.jpg
"""# Load Pretrained Models"""
import pretrained_networks
# use my copy of the blended model to save Doron's download bandwidth
# get the original here https://mega.nz/folder/OtllzJwa#C947mCCdEfMCRTWnDcs4qw
blended_url = "https://drive.google.com/uc?id=1H73TfV5gQ9ot7slSed_l-lim9X7pMRiU"
ffhq_url = "http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-ffhq-config-f.pkl"
_, _, Gs_blended = pretrained_networks.load_networks(blended_url)
_, _, Gs = pretrained_networks.load_networks(ffhq_url)
"""# Align Faces"""
!python align_images.py raw aligned
"""# Extract Latent Vector from Aligned Face"""
!python project_images.py --num-steps 1000 aligned generated
"""# Get Result from Blended Model"""
import numpy as np
from PIL import Image
import dnnlib
import dnnlib.tflib as tflib
from pathlib import Path
latent_dir = Path("generated")
latents = latent_dir.glob("*.npy")
for latent_file in latents:
latent = np.load(latent_file)
latent = np.expand_dims(latent,axis=0)
synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=False), minibatch_size=8)
images = Gs_blended.components.synthesis.run(latent, randomize_noise=False, **synthesis_kwargs)
Image.fromarray(images.transpose((0,2,3,1))[0], 'RGB').save(latent_file.parent / (f"{latent_file.stem}-toon.jpg"))
from PIL import Image
import matplotlib.pyplot as plt
embedded = Image.open("generated/example_01.png")
tooned = Image.open("generated/example_01-toon.jpg")
plt.figure(figsize=(16, 10))
plt.imshow(np.concatenate([np.array(embedded), np.array(tooned)], axis=1))
embedded = Image.open("generated/iu_01_01.png")
tooned = Image.open("generated/iu_01_01-toon.jpg")
plt.figure(figsize=(16, 10))
plt.imshow(np.concatenate([np.array(embedded), np.array(tooned)], axis=1))
embedded = Image.open("generated/iu_02_01.png")
tooned = Image.open("generated/iu_02_01-toon.jpg")
plt.figure(figsize=(16, 10))
plt.imshow(np.concatenate([np.array(embedded), np.array(tooned)], axis=1))
| 36.594059
| 313
| 0.763528
|
acfda46d6b1f9588a0d3c11c731197b2c9ec92b4
| 1,033
|
py
|
Python
|
collector/img_converter.py
|
Dalci/mini-line-tracer
|
d4cb159f9e097212cd65b3aa00ff3806d39df518
|
[
"MIT"
] | null | null | null |
collector/img_converter.py
|
Dalci/mini-line-tracer
|
d4cb159f9e097212cd65b3aa00ff3806d39df518
|
[
"MIT"
] | null | null | null |
collector/img_converter.py
|
Dalci/mini-line-tracer
|
d4cb159f9e097212cd65b3aa00ff3806d39df518
|
[
"MIT"
] | null | null | null |
import numpy as np
import pickle
import cv2
import os
def convert_img(img):
# thresholding
threshold = int(np.mean(img)) * 0.5
ret, cvtd_img = cv2.threshold(img.astype(np.uint8), threshold, 255, cv2.THRESH_BINARY_INV)
print(ret)
converted_image = cv2.resize(cvtd_img, (16,16), interpolation=cv2.INTER_AREA)
return converted_image
if __name__ == '__main__':
current = os.path.dirname(__file__)
print(current)
img_list = os.listdir(os.path.join(current, 'sample lane'))
#print('file list: ', img_list)
cvt_list = []
for img_file in img_list:
img = cv2.imread(os.path.join(current, 'sample lane', img_file), cv2.IMREAD_GRAYSCALE)
converted_image = convert_img(img)
cvt_list.append(np.array(converted_image))
#cv2.imshow("Converted Image", converted_image)
#print(type(cvt_list))
print(f'{img_file} converted.')
#with open('test_lane.p', 'wb') as f:
#pickle.dump(cvt_list, f)
#print('All data saved.')
| 25.825
| 94
| 0.656341
|
acfda4b5a4856f3d39ae0d257c0a1ad9995e178f
| 85
|
py
|
Python
|
rllab/train/base.py
|
TomorrowIsAnOtherDay/RLlab
|
9749a8e4f8eb0c6c0ade19622cee3bd8a82c76a9
|
[
"Apache-2.0"
] | null | null | null |
rllab/train/base.py
|
TomorrowIsAnOtherDay/RLlab
|
9749a8e4f8eb0c6c0ade19622cee3bd8a82c76a9
|
[
"Apache-2.0"
] | null | null | null |
rllab/train/base.py
|
TomorrowIsAnOtherDay/RLlab
|
9749a8e4f8eb0c6c0ade19622cee3bd8a82c76a9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf8
# File: base.py
import functools32 as functools
| 14.166667
| 31
| 0.741176
|
acfda521dd431fdbb1ba7cc642e3efe32f88a49c
| 972
|
py
|
Python
|
sigcom/ingest/mongo/mongo_entities_meta_jsonld.py
|
dcic/signature-commons-controller
|
b69c4063235d927da27891e8a30d2822c6768a66
|
[
"Apache-2.0"
] | null | null | null |
sigcom/ingest/mongo/mongo_entities_meta_jsonld.py
|
dcic/signature-commons-controller
|
b69c4063235d927da27891e8a30d2822c6768a66
|
[
"Apache-2.0"
] | 2
|
2020-06-09T14:52:34.000Z
|
2020-11-06T18:02:49.000Z
|
sigcom/ingest/mongo/mongo_entities_meta_jsonld.py
|
dcic/signature-commons-controller
|
b69c4063235d927da27891e8a30d2822c6768a66
|
[
"Apache-2.0"
] | null | null | null |
import os
import pymongo
import json
from sigcom.util import first, mongo_bulk_upsert
inputs = (
'*.entities.jsonld',
)
def requirements(uri=[], **kwargs):
return 'mongodb' in set([u.scheme for u in uri])
def ingest(input_files, uri=[], limit=1000, **kawrgs):
input_file, = input_files
# Get mongo uri
mongo_uri = first(u for u in uri if 'mongodb' in u.scheme.split('+'))
# Get extract mongo db name
db_path = mongo_uri.path[1:]
del mongo_uri.path
# Instantiate mongo client
mongo = pymongo.MongoClient(str(mongo_uri))
# Get mongo db
db = getattr(mongo, db_path)
#
def generate_entities():
with open(input_file, 'r') as fr:
for entity in map(json.loads, fr):
yield {
'_id': entity['@id'],
}, {
'$set': {
# 'dataset': entity['dataset'],
'meta': entity['meta'],
},
}
#
mongo_bulk_upsert(
db.entities,
generate_entities(),
limit=limit,
)
| 23.142857
| 71
| 0.605967
|
acfda532e4b65c25bda34647c4f8723cab542ece
| 1,072
|
py
|
Python
|
tests/test_system.py
|
srobo-legacy/comp-poltergeist
|
15a3605257246455152ea1d4d0281329269c06f2
|
[
"MIT"
] | null | null | null |
tests/test_system.py
|
srobo-legacy/comp-poltergeist
|
15a3605257246455152ea1d4d0281329269c06f2
|
[
"MIT"
] | null | null | null |
tests/test_system.py
|
srobo-legacy/comp-poltergeist
|
15a3605257246455152ea1d4d0281329269c06f2
|
[
"MIT"
] | null | null | null |
import os.path
import subprocess
import yaml
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def setup_module():
""" Start redis """
subprocess.call([os.path.join(ROOT, 'start-redis')])
def teardown_module():
""" Stop redis """
subprocess.call([os.path.join(ROOT, 'stop-redis')])
def check_output(*args):
commander = os.path.join(ROOT, 'command')
args_list = list(args)
args_list.insert(0, commander)
output = subprocess.check_output(args_list)
assert output is not None
print output.strip()
return output
def test_version():
version = check_output('version')
assert len(version) > 4
assert len(version) <= 40
def test_teams():
teams_info_yaml = check_output('list-teams', '--yaml')
teams_info = yaml.load(teams_info_yaml)
assert 'list' in teams_info
# Can't assert about the data
def test_delay():
dur = 42
check_output('set-delay', str(dur))
delay = yaml.load(check_output('get-delay'))
assert delay['units'] == 'seconds'
assert delay['delay'] == dur
| 25.52381
| 66
| 0.669776
|
acfda5f28bc6abdd9d1474c9a7a27a87cdac0cf8
| 2,509
|
py
|
Python
|
documentation_tools/build_documentation.py
|
uliwitness/HexFiend
|
b7fc8642f9076bbfe1e2dc0cb23a38fca982fc45
|
[
"BSD-2-Clause"
] | null | null | null |
documentation_tools/build_documentation.py
|
uliwitness/HexFiend
|
b7fc8642f9076bbfe1e2dc0cb23a38fca982fc45
|
[
"BSD-2-Clause"
] | null | null | null |
documentation_tools/build_documentation.py
|
uliwitness/HexFiend
|
b7fc8642f9076bbfe1e2dc0cb23a38fca982fc45
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
import sys, os, subprocess, distutils.spawn, shutil
env = os.getenv
norm = os.path.normpath
built_products_dir = env("BUILT_PRODUCTS_DIR")
if built_products_dir == None:
print "Environmental variable BUILT_PRODUCTS_DIR is missing. This script should be run from within Xcode."
sys.exit(1)
src_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
config_path= norm(src_root + "/documentation_tools/hexfiend_doxyfile.config")
if not os.path.isfile(config_path):
print "Doxygen config file does not exist at " + config_path
sys.exit(1)
# Silently take advantage of MacPorts or Homebrew.
doxygen_search = os.pathsep.join((os.environ['PATH'],'/usr/local/bin','/opt/local/bin'))
doxygen_path = os.getenv("DOXYGEN_PATH") or distutils.spawn.find_executable("doxygen", path=doxygen_search)
if not doxygen_path or not os.path.isfile(doxygen_path):
if os.getenv("DOXYGEN_PATH"):
print "Could not find doxygen at DOXYGEN_PATH=", doxygen_path
else:
print "Could not find doxygen: install doxygen to your PATH, or add a DOXYGEN_PATH"
sys.exit(1)
# Headers should be a symlink, so get its real path
headers = os.path.realpath(built_products_dir + "/HexFiend.framework/Headers")
if not os.path.isdir(headers):
print "The HexFiend header directory does not exist at " + headers
sys.exit(1)
output_dir = norm(os.path.join(src_root, 'docs'))
try:
os.mkdir(output_dir)
except:
pass
if not os.path.isdir(output_dir):
print "The documentation output directory does not exist at " + output_dir
sys.exit(1)
print 'Documentation output: ' + output_dir
sys.stdout.flush()
new_wd = norm(src_root + "/documentation_tools/")
final_output_dir = norm(os.path.join(output_dir, 'docs'))
temp_output_dir = norm(os.path.join(output_dir, 'html'))
shutil.rmtree(final_output_dir)
proc = subprocess.Popen([doxygen_path, '-'], shell=False, cwd=new_wd, stdin=subprocess.PIPE)
conf_file = open(config_path, 'r')
for line in conf_file:
if line.startswith('INPUT '):
line = 'INPUT = ' + headers
elif line.startswith('OUTPUT_DIRECTORY '):
line = 'OUTPUT_DIRECTORY = ' + output_dir
# Strip the header path as it probably contains the user name,
# which we don't want outputted in the html
elif line.startswith('STRIP_FROM_PATH '):
line = 'STRIP_FROM_PATH = ' + headers
proc.stdin.write(line)
proc.stdin.close()
proc.wait()
# Move the 'html' directory to 'docs'
os.rename(temp_output_dir, final_output_dir)
sys.exit(0)
| 33.905405
| 110
| 0.736548
|
acfda6393b7733d34a8d2011bda3f270a5998bb3
| 377
|
py
|
Python
|
timepiece/manager/migrations/0002_profile_payroll.py
|
mgeorge8/django_time
|
f75a442941b0ebbb6cc46a6d18e42b91695b7e57
|
[
"MIT"
] | 1
|
2018-11-09T02:09:14.000Z
|
2018-11-09T02:09:14.000Z
|
timepiece/manager/migrations/0002_profile_payroll.py
|
mgeorge8/django_time
|
f75a442941b0ebbb6cc46a6d18e42b91695b7e57
|
[
"MIT"
] | null | null | null |
timepiece/manager/migrations/0002_profile_payroll.py
|
mgeorge8/django_time
|
f75a442941b0ebbb6cc46a6d18e42b91695b7e57
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.2 on 2019-01-17 12:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('manager', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profile',
name='payroll',
field=models.BooleanField(default=True),
),
]
| 19.842105
| 52
| 0.588859
|
acfda647565df219e5179b9468ee304ba893cb87
| 2,056
|
py
|
Python
|
ocr/save_nparrays.py
|
Mohamed209/Train-CRNN
|
6d2fe08d3bc43494158c732dc5997a951aef18c9
|
[
"MIT"
] | 1
|
2020-06-29T19:39:15.000Z
|
2020-06-29T19:39:15.000Z
|
ocr/save_nparrays.py
|
Mohamed209/Train-CRNN
|
6d2fe08d3bc43494158c732dc5997a951aef18c9
|
[
"MIT"
] | 6
|
2020-09-25T22:41:28.000Z
|
2022-02-09T23:38:46.000Z
|
ocr/save_nparrays.py
|
Mohamed209/Train-CRNN
|
6d2fe08d3bc43494158c732dc5997a951aef18c9
|
[
"MIT"
] | null | null | null |
from keras.preprocessing.sequence import pad_sequences
import numpy as np
import h5py
import cv2
import os
import string
import pyarabic.araby as araby
import sys
# utils
letters = u'٠١٢٣٤٥٦٧٨٩'+'0123456789'
def labels_to_text(labels):
return ''.join(list(map(lambda x: letters[int(x)], labels)))
def text_to_labels(text):
return list(map(lambda x: letters.index(x), text))
# data loader
img_h = 32
img_w = 432
# data loader script expects data to be found in folder as pairs of images , txt files contain labels
DATA_PATH = '../dataset/generated_data/'
data = sorted(os.listdir(DATA_PATH))
images = np.zeros(shape=(len(data)//2, img_h, img_w, 1))
label_length = np.zeros((len(data)//2, 1), dtype=np.int64)
text = []
i = 0
j = 0
for sample in data:
print("loaded >>>", sample)
if sample.split('.')[-1] == 'png':
img = cv2.imread(DATA_PATH+sample, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (img_w, img_h))
img = img.astype(np.float32)
img = (img / 255.0)
img = np.expand_dims(img, axis=-1)
images[i] = img
i += 1
else:
with open(DATA_PATH+sample, 'r',encoding='utf-8') as s:
sent = s.readlines()
text.append(sent)
label_length[j] = len(sent[0])
j += 1
# we need number repr for text
gt_text = []
textnum = []
for line in text:
data = line[0].strip()
textnum.append(text_to_labels(data))
for i in range(len(textnum)):
gt_text.append(textnum[i])
gt_padded_txt = pad_sequences(
gt_text, maxlen=8, padding='post', truncating='post', value=0)
print("images >>", images.shape)
print("text >>", gt_padded_txt.shape)
print("label length>>", label_length.shape)
# save np arrays to hard disk so as not to generate them from scratch in the begining of each training session
h5 = h5py.File('../dataset/dataset.h5', 'w')
h5.create_dataset('images', data=images)
h5.create_dataset('text', data=gt_padded_txt)
h5.create_dataset('label_length', data=label_length)
h5.close()
print("np arrays saved to hard disk :)")
| 28.555556
| 110
| 0.668774
|
acfda64a97d758bf0ee7a8fc45284720ce50b6fd
| 611
|
py
|
Python
|
src/niweb/apps/nerds/migrations/0001_initial.py
|
emjemj/ni
|
a78e6d97d1e4610aad7698c4f0f459221c680b4f
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2018-12-21T09:35:27.000Z
|
2019-07-31T18:51:58.000Z
|
src/niweb/apps/nerds/migrations/0001_initial.py
|
emjemj/ni
|
a78e6d97d1e4610aad7698c4f0f459221c680b4f
|
[
"BSD-2-Clause-FreeBSD"
] | 6
|
2019-07-25T07:10:23.000Z
|
2021-02-08T09:58:57.000Z
|
src/niweb/apps/nerds/migrations/0001_initial.py
|
emjemj/ni
|
a78e6d97d1e4610aad7698c4f0f459221c680b4f
|
[
"BSD-2-Clause-FreeBSD"
] | 5
|
2019-02-06T12:00:26.000Z
|
2021-11-19T14:48:06.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('noclook', '0002_nodetype_hidden'),
]
operations = [
migrations.CreateModel(
name='HostUserMap',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('domain', models.CharField(max_length=255, unique=True)),
('host_user', models.CharField(max_length=255)),
],
),
]
| 27.772727
| 114
| 0.590835
|
acfda65ed402b428c90c45c3710d73821c7014fe
| 814
|
py
|
Python
|
students/k3340/practical_works/Bakirova_Shoola/PW1-3/django_project/urls.py
|
BakirovaS/ITMO_ICT_WebProgramming_2020
|
2de0ec900e729a22112fd8827fb685ce90ae8a9b
|
[
"MIT"
] | null | null | null |
students/k3340/practical_works/Bakirova_Shoola/PW1-3/django_project/urls.py
|
BakirovaS/ITMO_ICT_WebProgramming_2020
|
2de0ec900e729a22112fd8827fb685ce90ae8a9b
|
[
"MIT"
] | null | null | null |
students/k3340/practical_works/Bakirova_Shoola/PW1-3/django_project/urls.py
|
BakirovaS/ITMO_ICT_WebProgramming_2020
|
2de0ec900e729a22112fd8827fb685ce90ae8a9b
|
[
"MIT"
] | null | null | null |
"""django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('project_first_app.urls')),
]
| 35.391304
| 77
| 0.708845
|
acfda6f8c059e3546f97f546bfc26f2021a372db
| 40,522
|
py
|
Python
|
sklearn/ensemble/_bagging.py
|
lacouth/scikit-learn
|
d44c7ae8eb164ebf563f16301e67c519c54cb119
|
[
"BSD-3-Clause"
] | null | null | null |
sklearn/ensemble/_bagging.py
|
lacouth/scikit-learn
|
d44c7ae8eb164ebf563f16301e67c519c54cb119
|
[
"BSD-3-Clause"
] | null | null | null |
sklearn/ensemble/_bagging.py
|
lacouth/scikit-learn
|
d44c7ae8eb164ebf563f16301e67c519c54cb119
|
[
"BSD-3-Clause"
] | null | null | null |
"""Bagging meta-estimator."""
# Author: Gilles Louppe <g.louppe@gmail.com>
# License: BSD 3 clause
import itertools
import numbers
import numpy as np
from abc import ABCMeta, abstractmethod
from warnings import warn
from joblib import Parallel
from ._base import BaseEnsemble, _partition_estimators
from ..base import ClassifierMixin, RegressorMixin
from ..metrics import r2_score, accuracy_score
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..utils import check_random_state, column_or_1d, deprecated
from ..utils import indices_to_mask
from ..utils.metaestimators import if_delegate_has_method
from ..utils.multiclass import check_classification_targets
from ..utils.random import sample_without_replacement
from ..utils.validation import has_fit_parameter, check_is_fitted, _check_sample_weight
from ..utils.fixes import delayed
__all__ = ["BaggingClassifier", "BaggingRegressor"]
MAX_INT = np.iinfo(np.int32).max
def _generate_indices(random_state, bootstrap, n_population, n_samples):
"""Draw randomly sampled indices."""
# Draw sample indices
if bootstrap:
indices = random_state.randint(0, n_population, n_samples)
else:
indices = sample_without_replacement(
n_population, n_samples, random_state=random_state
)
return indices
def _generate_bagging_indices(
random_state,
bootstrap_features,
bootstrap_samples,
n_features,
n_samples,
max_features,
max_samples,
):
"""Randomly draw feature and sample indices."""
# Get valid random state
random_state = check_random_state(random_state)
# Draw indices
feature_indices = _generate_indices(
random_state, bootstrap_features, n_features, max_features
)
sample_indices = _generate_indices(
random_state, bootstrap_samples, n_samples, max_samples
)
return feature_indices, sample_indices
def _parallel_build_estimators(
n_estimators, ensemble, X, y, sample_weight, seeds, total_n_estimators, verbose
):
"""Private function used to build a batch of estimators within a job."""
# Retrieve settings
n_samples, n_features = X.shape
max_features = ensemble._max_features
max_samples = ensemble._max_samples
bootstrap = ensemble.bootstrap
bootstrap_features = ensemble.bootstrap_features
support_sample_weight = has_fit_parameter(ensemble.base_estimator_, "sample_weight")
if not support_sample_weight and sample_weight is not None:
raise ValueError("The base estimator doesn't support sample weight")
# Build estimators
estimators = []
estimators_features = []
for i in range(n_estimators):
if verbose > 1:
print(
"Building estimator %d of %d for this parallel run "
"(total %d)..." % (i + 1, n_estimators, total_n_estimators)
)
random_state = seeds[i]
estimator = ensemble._make_estimator(append=False, random_state=random_state)
# Draw random feature, sample indices
features, indices = _generate_bagging_indices(
random_state,
bootstrap_features,
bootstrap,
n_features,
n_samples,
max_features,
max_samples,
)
# Draw samples, using sample weights, and then fit
if support_sample_weight:
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,))
else:
curr_sample_weight = sample_weight.copy()
if bootstrap:
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
else:
not_indices_mask = ~indices_to_mask(indices, n_samples)
curr_sample_weight[not_indices_mask] = 0
estimator.fit(X[:, features], y, sample_weight=curr_sample_weight)
else:
estimator.fit((X[indices])[:, features], y[indices])
estimators.append(estimator)
estimators_features.append(features)
return estimators, estimators_features
def _parallel_predict_proba(estimators, estimators_features, X, n_classes):
"""Private function used to compute (proba-)predictions within a job."""
n_samples = X.shape[0]
proba = np.zeros((n_samples, n_classes))
for estimator, features in zip(estimators, estimators_features):
if hasattr(estimator, "predict_proba"):
proba_estimator = estimator.predict_proba(X[:, features])
if n_classes == len(estimator.classes_):
proba += proba_estimator
else:
proba[:, estimator.classes_] += proba_estimator[
:, range(len(estimator.classes_))
]
else:
# Resort to voting
predictions = estimator.predict(X[:, features])
for i in range(n_samples):
proba[i, predictions[i]] += 1
return proba
def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes):
"""Private function used to compute log probabilities within a job."""
n_samples = X.shape[0]
log_proba = np.empty((n_samples, n_classes))
log_proba.fill(-np.inf)
all_classes = np.arange(n_classes, dtype=int)
for estimator, features in zip(estimators, estimators_features):
log_proba_estimator = estimator.predict_log_proba(X[:, features])
if n_classes == len(estimator.classes_):
log_proba = np.logaddexp(log_proba, log_proba_estimator)
else:
log_proba[:, estimator.classes_] = np.logaddexp(
log_proba[:, estimator.classes_],
log_proba_estimator[:, range(len(estimator.classes_))],
)
missing = np.setdiff1d(all_classes, estimator.classes_)
log_proba[:, missing] = np.logaddexp(log_proba[:, missing], -np.inf)
return log_proba
def _parallel_decision_function(estimators, estimators_features, X):
"""Private function used to compute decisions within a job."""
return sum(
estimator.decision_function(X[:, features])
for estimator, features in zip(estimators, estimators_features)
)
def _parallel_predict_regression(estimators, estimators_features, X):
"""Private function used to compute predictions within a job."""
return sum(
estimator.predict(X[:, features])
for estimator, features in zip(estimators, estimators_features)
)
class BaseBagging(BaseEnsemble, metaclass=ABCMeta):
"""Base class for Bagging meta-estimator.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(
self,
base_estimator=None,
n_estimators=10,
*,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=None,
random_state=None,
verbose=0
):
super().__init__(base_estimator=base_estimator, n_estimators=n_estimators)
self.max_samples = max_samples
self.max_features = max_features
self.bootstrap = bootstrap
self.bootstrap_features = bootstrap_features
self.oob_score = oob_score
self.warm_start = warm_start
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Build a Bagging ensemble of estimators from the training
set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if the base estimator supports
sample weighting.
Returns
-------
self : object
"""
return self._fit(X, y, self.max_samples, sample_weight=sample_weight)
def _parallel_args(self):
return {}
def _fit(self, X, y, max_samples=None, max_depth=None, sample_weight=None):
"""Build a Bagging ensemble of estimators from the training
set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
max_samples : int or float, default=None
Argument to use instead of self.max_samples.
max_depth : int, default=None
Override value used when constructing base estimator. Only
supported if the base estimator has a max_depth parameter.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if the base estimator supports
sample weighting.
Returns
-------
self : object
"""
random_state = check_random_state(self.random_state)
# Convert data (X is required to be 2d and indexable)
X, y = self._validate_data(
X,
y,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
multi_output=True,
)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=None)
# Remap output
n_samples = X.shape[0]
self._n_samples = n_samples
y = self._validate_y(y)
# Check parameters
self._validate_estimator()
if max_depth is not None:
self.base_estimator_.max_depth = max_depth
# Validate max_samples
if max_samples is None:
max_samples = self.max_samples
elif not isinstance(max_samples, numbers.Integral):
max_samples = int(max_samples * X.shape[0])
if not (0 < max_samples <= X.shape[0]):
raise ValueError("max_samples must be in (0, n_samples]")
# Store validated integer row sampling value
self._max_samples = max_samples
# Validate max_features
if isinstance(self.max_features, numbers.Integral):
max_features = self.max_features
elif isinstance(self.max_features, float):
max_features = self.max_features * self.n_features_in_
else:
raise ValueError("max_features must be int or float")
if not (0 < max_features <= self.n_features_in_):
raise ValueError("max_features must be in (0, n_features]")
max_features = max(1, int(max_features))
# Store validated integer feature sampling value
self._max_features = max_features
# Other checks
if not self.bootstrap and self.oob_score:
raise ValueError(
"Out of bag estimation only available" " if bootstrap=True"
)
if self.warm_start and self.oob_score:
raise ValueError(
"Out of bag estimate only available" " if warm_start=False"
)
if hasattr(self, "oob_score_") and self.warm_start:
del self.oob_score_
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
self.estimators_features_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError(
"n_estimators=%d must be larger or equal to "
"len(estimators_)=%d when warm_start==True"
% (self.n_estimators, len(self.estimators_))
)
elif n_more_estimators == 0:
warn(
"Warm-start fitting without increasing n_estimators does not "
"fit new trees."
)
return self
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
n_more_estimators, self.n_jobs
)
total_n_estimators = sum(n_estimators)
# Advance random state to state after training
# the first n_estimators
if self.warm_start and len(self.estimators_) > 0:
random_state.randint(MAX_INT, size=len(self.estimators_))
seeds = random_state.randint(MAX_INT, size=n_more_estimators)
self._seeds = seeds
all_results = Parallel(
n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args()
)(
delayed(_parallel_build_estimators)(
n_estimators[i],
self,
X,
y,
sample_weight,
seeds[starts[i] : starts[i + 1]],
total_n_estimators,
verbose=self.verbose,
)
for i in range(n_jobs)
)
# Reduce
self.estimators_ += list(
itertools.chain.from_iterable(t[0] for t in all_results)
)
self.estimators_features_ += list(
itertools.chain.from_iterable(t[1] for t in all_results)
)
if self.oob_score:
self._set_oob_score(X, y)
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y(self, y):
if len(y.shape) == 1 or y.shape[1] == 1:
return column_or_1d(y, warn=True)
else:
return y
def _get_estimators_indices(self):
# Get drawn indices along both sample and feature axes
for seed in self._seeds:
# Operations accessing random_state must be performed identically
# to those in `_parallel_build_estimators()`
feature_indices, sample_indices = _generate_bagging_indices(
seed,
self.bootstrap_features,
self.bootstrap,
self.n_features_in_,
self._n_samples,
self._max_features,
self._max_samples,
)
yield feature_indices, sample_indices
@property
def estimators_samples_(self):
"""
The subset of drawn samples for each base estimator.
Returns a dynamically generated list of indices identifying
the samples used for fitting each member of the ensemble, i.e.,
the in-bag samples.
Note: the list is re-created at each call to the property in order
to reduce the object memory footprint by not storing the sampling
data. Thus fetching the property may be slower than expected.
"""
return [sample_indices for _, sample_indices in self._get_estimators_indices()]
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute n_features_ was deprecated in version 1.0 and will be "
"removed in 1.2. Use 'n_features_in_' instead."
)
@property
def n_features_(self):
return self.n_features_in_
class BaggingClassifier(ClassifierMixin, BaseBagging):
"""A Bagging classifier.
A Bagging classifier is an ensemble meta-estimator that fits base
classifiers each on random subsets of the original dataset and then
aggregate their individual predictions (either by voting or by averaging)
to form a final prediction. Such a meta-estimator can typically be used as
a way to reduce the variance of a black-box estimator (e.g., a decision
tree), by introducing randomization into its construction procedure and
then making an ensemble out of it.
This algorithm encompasses several works from the literature. When random
subsets of the dataset are drawn as random subsets of the samples, then
this algorithm is known as Pasting [1]_. If samples are drawn with
replacement, then the method is known as Bagging [2]_. When random subsets
of the dataset are drawn as random subsets of the features, then the method
is known as Random Subspaces [3]_. Finally, when base estimators are built
on subsets of both samples and features, then the method is known as
Random Patches [4]_.
Read more in the :ref:`User Guide <bagging>`.
.. versionadded:: 0.15
Parameters
----------
base_estimator : object, default=None
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a
:class:`~sklearn.tree.DecisionTreeClassifier`.
n_estimators : int, default=10
The number of base estimators in the ensemble.
max_samples : int or float, default=1.0
The number of samples to draw from X to train each base estimator (with
replacement by default, see `bootstrap` for more details).
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
max_features : int or float, default=1.0
The number of features to draw from X to train each base estimator (
without replacement by default, see `bootstrap_features` for more
details).
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : bool, default=True
Whether samples are drawn with replacement. If False, sampling
without replacement is performed.
bootstrap_features : bool, default=False
Whether features are drawn with replacement.
oob_score : bool, default=False
Whether to use out-of-bag samples to estimate
the generalization error. Only available if bootstrap=True.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.17
*warm_start* constructor parameter.
n_jobs : int, default=None
The number of jobs to run in parallel for both :meth:`fit` and
:meth:`predict`. ``None`` means 1 unless in a
:obj:`joblib.parallel_backend` context. ``-1`` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls the random resampling of the original dataset
(sample wise and feature wise).
If the base estimator accepts a `random_state` attribute, a different
seed is generated for each instance in the ensemble.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
n_features_ : int
The number of features when :meth:`fit` is performed.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
estimators_ : list of estimators
The collection of fitted base estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by an array of the indices selected.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_classes_ : int or list
The number of classes.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_decision_function_ : ndarray of shape (n_samples, n_classes)
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN. This attribute exists
only when ``oob_score`` is True.
Examples
--------
>>> from sklearn.svm import SVC
>>> from sklearn.ensemble import BaggingClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=100, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = BaggingClassifier(base_estimator=SVC(),
... n_estimators=10, random_state=0).fit(X, y)
>>> clf.predict([[0, 0, 0, 0]])
array([1])
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
"""
def __init__(
self,
base_estimator=None,
n_estimators=10,
*,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=None,
random_state=None,
verbose=0
):
super().__init__(
base_estimator,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super()._validate_estimator(default=DecisionTreeClassifier())
def _set_oob_score(self, X, y):
n_samples = y.shape[0]
n_classes_ = self.n_classes_
predictions = np.zeros((n_samples, n_classes_))
for estimator, samples, features in zip(
self.estimators_, self.estimators_samples_, self.estimators_features_
):
# Create mask for OOB samples
mask = ~indices_to_mask(samples, n_samples)
if hasattr(estimator, "predict_proba"):
predictions[mask, :] += estimator.predict_proba(
(X[mask, :])[:, features]
)
else:
p = estimator.predict((X[mask, :])[:, features])
j = 0
for i in range(n_samples):
if mask[i]:
predictions[i, p[j]] += 1
j += 1
if (predictions.sum(axis=1) == 0).any():
warn(
"Some inputs do not have OOB scores. "
"This probably means too few estimators were used "
"to compute any reliable oob estimates."
)
oob_decision_function = predictions / predictions.sum(axis=1)[:, np.newaxis]
oob_score = accuracy_score(y, np.argmax(predictions, axis=1))
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score
def _validate_y(self, y):
y = column_or_1d(y, warn=True)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is computed as the class with
the highest mean predicted probability. If base estimators do not
implement a ``predict_proba`` method, then it resorts to voting.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted classes.
"""
predicted_probabilitiy = self.predict_proba(X)
return self.classes_.take((np.argmax(predicted_probabilitiy, axis=1)), axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the base estimators in the
ensemble. If base estimators do not implement a ``predict_proba``
method, then it resorts to voting and the predicted class probabilities
of an input sample represents the proportion of estimators predicting
each class.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
# Check data
X = self._validate_data(
X,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
reset=False,
)
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
self.n_estimators, self.n_jobs
)
all_proba = Parallel(
n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args()
)(
delayed(_parallel_predict_proba)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
self.n_classes_,
)
for i in range(n_jobs)
)
# Reduce
proba = sum(all_proba) / self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the base
estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
if hasattr(self.base_estimator_, "predict_log_proba"):
# Check data
X = self._validate_data(
X,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
reset=False,
)
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
self.n_estimators, self.n_jobs
)
all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_log_proba)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
self.n_classes_,
)
for i in range(n_jobs)
)
# Reduce
log_proba = all_log_proba[0]
for j in range(1, len(all_log_proba)):
log_proba = np.logaddexp(log_proba, all_log_proba[j])
log_proba -= np.log(self.n_estimators)
return log_proba
else:
return np.log(self.predict_proba(X))
@if_delegate_has_method(delegate="base_estimator")
def decision_function(self, X):
"""Average of the decision functions of the base classifiers.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
score : ndarray of shape (n_samples, k)
The decision function of the input samples. The columns correspond
to the classes in sorted order, as they appear in the attribute
``classes_``. Regression and binary classification are special
cases with ``k == 1``, otherwise ``k==n_classes``.
"""
check_is_fitted(self)
# Check data
X = self._validate_data(
X,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
reset=False,
)
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
self.n_estimators, self.n_jobs
)
all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_decision_function)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
)
for i in range(n_jobs)
)
# Reduce
decisions = sum(all_decisions) / self.n_estimators
return decisions
class BaggingRegressor(RegressorMixin, BaseBagging):
"""A Bagging regressor.
A Bagging regressor is an ensemble meta-estimator that fits base
regressors each on random subsets of the original dataset and then
aggregate their individual predictions (either by voting or by averaging)
to form a final prediction. Such a meta-estimator can typically be used as
a way to reduce the variance of a black-box estimator (e.g., a decision
tree), by introducing randomization into its construction procedure and
then making an ensemble out of it.
This algorithm encompasses several works from the literature. When random
subsets of the dataset are drawn as random subsets of the samples, then
this algorithm is known as Pasting [1]_. If samples are drawn with
replacement, then the method is known as Bagging [2]_. When random subsets
of the dataset are drawn as random subsets of the features, then the method
is known as Random Subspaces [3]_. Finally, when base estimators are built
on subsets of both samples and features, then the method is known as
Random Patches [4]_.
Read more in the :ref:`User Guide <bagging>`.
.. versionadded:: 0.15
Parameters
----------
base_estimator : object, default=None
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a
:class:`~sklearn.tree.DecisionTreeRegressor`.
n_estimators : int, default=10
The number of base estimators in the ensemble.
max_samples : int or float, default=1.0
The number of samples to draw from X to train each base estimator (with
replacement by default, see `bootstrap` for more details).
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
max_features : int or float, default=1.0
The number of features to draw from X to train each base estimator (
without replacement by default, see `bootstrap_features` for more
details).
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : bool, default=True
Whether samples are drawn with replacement. If False, sampling
without replacement is performed.
bootstrap_features : bool, default=False
Whether features are drawn with replacement.
oob_score : bool, default=False
Whether to use out-of-bag samples to estimate
the generalization error. Only available if bootstrap=True.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble. See :term:`the Glossary <warm_start>`.
n_jobs : int, default=None
The number of jobs to run in parallel for both :meth:`fit` and
:meth:`predict`. ``None`` means 1 unless in a
:obj:`joblib.parallel_backend` context. ``-1`` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls the random resampling of the original dataset
(sample wise and feature wise).
If the base estimator accepts a `random_state` attribute, a different
seed is generated for each instance in the ensemble.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
n_features_ : int
The number of features when :meth:`fit` is performed.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
estimators_ : list of estimators
The collection of fitted sub-estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by an array of the indices selected.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_prediction_ : ndarray of shape (n_samples,)
Prediction computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_prediction_` might contain NaN. This attribute exists only
when ``oob_score`` is True.
Examples
--------
>>> from sklearn.svm import SVR
>>> from sklearn.ensemble import BaggingRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_samples=100, n_features=4,
... n_informative=2, n_targets=1,
... random_state=0, shuffle=False)
>>> regr = BaggingRegressor(base_estimator=SVR(),
... n_estimators=10, random_state=0).fit(X, y)
>>> regr.predict([[0, 0, 0, 0]])
array([-2.8720...])
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
"""
def __init__(
self,
base_estimator=None,
n_estimators=10,
*,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=None,
random_state=None,
verbose=0
):
super().__init__(
base_estimator,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
# Check data
X = self._validate_data(
X,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
reset=False,
)
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
self.n_estimators, self.n_jobs
)
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_regression)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
)
for i in range(n_jobs)
)
# Reduce
y_hat = sum(all_y_hat) / self.n_estimators
return y_hat
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super()._validate_estimator(default=DecisionTreeRegressor())
def _set_oob_score(self, X, y):
n_samples = y.shape[0]
predictions = np.zeros((n_samples,))
n_predictions = np.zeros((n_samples,))
for estimator, samples, features in zip(
self.estimators_, self.estimators_samples_, self.estimators_features_
):
# Create mask for OOB samples
mask = ~indices_to_mask(samples, n_samples)
predictions[mask] += estimator.predict((X[mask, :])[:, features])
n_predictions[mask] += 1
if (n_predictions == 0).any():
warn(
"Some inputs do not have OOB scores. "
"This probably means too few estimators were used "
"to compute any reliable oob estimates."
)
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
self.oob_score_ = r2_score(y, predictions)
| 34.993092
| 88
| 0.624253
|
acfda8bb598896a9ab9a025b66c0079dcc96e608
| 3,586
|
py
|
Python
|
utils/user_functions.py
|
aragilar/NewsBlur
|
64ecd83bf4cea175f1bdeeb6e475fd5cadb679c9
|
[
"MIT"
] | 2
|
2015-09-05T10:40:30.000Z
|
2017-03-05T12:31:21.000Z
|
utils/user_functions.py
|
aragilar/NewsBlur
|
64ecd83bf4cea175f1bdeeb6e475fd5cadb679c9
|
[
"MIT"
] | null | null | null |
utils/user_functions.py
|
aragilar/NewsBlur
|
64ecd83bf4cea175f1bdeeb6e475fd5cadb679c9
|
[
"MIT"
] | null | null | null |
import hashlib
from django.contrib.auth.models import User
from django.core.cache import cache
from django.utils.http import urlquote
from django.http import HttpResponseForbidden
from django.conf import settings
def ajax_login_required(function=None):
def _dec(view_func):
def _view(request, *args, **kwargs):
if request.user.is_anonymous():
return HttpResponseForbidden()
else:
return view_func(request, *args, **kwargs)
_view.__name__ = view_func.__name__
_view.__dict__ = view_func.__dict__
_view.__doc__ = view_func.__doc__
return _view
if function is None:
return _dec
else:
return _dec(function)
def admin_only(function=None):
def _dec(view_func):
def _view(request, *args, **kwargs):
if not request.user.is_staff:
return HttpResponseForbidden()
else:
return view_func(request, *args, **kwargs)
_view.__name__ = view_func.__name__
_view.__dict__ = view_func.__dict__
_view.__doc__ = view_func.__doc__
return _view
if function is None:
return _dec
else:
return _dec(function)
def get_user(request):
if not hasattr(request, 'user'):
user = request
else:
user = request.user
if user.is_anonymous():
user = cache.get('user:%s' % settings.HOMEPAGE_USERNAME, None)
if not user:
try:
user = User.objects.get(username=settings.HOMEPAGE_USERNAME)
cache.set('user:%s' % user, user)
except User.DoesNotExist:
user = User.objects.create(username=settings.HOMEPAGE_USERNAME)
user.set_password('')
user.save()
return user
def invalidate_template_cache(fragment_name, *variables):
args = hashlib.md5(u':'.join([urlquote(var) for var in variables]))
cache_key = 'template.cache.%s.%s' % (fragment_name, args.hexdigest())
cache.delete(cache_key)
def generate_secret_token(phrase, size=12):
"""Generate a (SHA1) security hash from the provided info."""
info = (phrase, settings.SECRET_KEY)
return hashlib.sha1("".join(info)).hexdigest()[:size]
def extract_user_agent(request):
user_agent = request.environ.get('HTTP_USER_AGENT', '')
platform = '------'
if 'iPad App' in user_agent:
platform = 'iPad'
elif 'iPhone App' in user_agent:
platform = 'iPhone'
elif 'Blar' in user_agent:
platform = 'Blar'
elif 'Android' in user_agent:
platform = 'Androd'
elif 'Metroblur' in user_agent:
platform = 'Metrob'
elif 'Pluggio' in user_agent:
platform = 'Plugio'
elif 'MSIE' in user_agent:
platform = 'IE'
if 'MSIE 9' in user_agent:
platform += '9'
elif 'MSIE 10' in user_agent:
platform += '10'
elif 'MSIE 8' in user_agent:
platform += '8'
elif 'Chrome' in user_agent:
platform = 'Chrome'
elif 'Safari' in user_agent:
platform = 'Safari'
elif 'MeeGo' in user_agent:
platform = 'MeeGo'
elif 'Firefox' in user_agent:
platform = 'FF'
elif 'Opera' in user_agent:
platform = 'Opera'
elif 'WP7' in user_agent:
platform = 'WP7'
elif 'WP8' in user_agent:
platform = 'WP8'
elif 'Tafiti' in user_agent:
platform = 'Tafiti'
elif 'ReadKit' in user_agent:
platform = 'ReadKt'
return platform
| 30.649573
| 79
| 0.604852
|
acfda8df889e1d6c4f0e3a3f2bc9010e2e9780c3
| 21,733
|
py
|
Python
|
rfa_toolbox/graphs.py
|
MLRichter/receptive_field_analysis_toolbox
|
308c86ceb350925050ba3cad4931af09e36782ef
|
[
"MIT"
] | 77
|
2022-01-10T19:46:52.000Z
|
2022-03-22T11:03:22.000Z
|
rfa_toolbox/graphs.py
|
MLRichter/receptive_field_analysis_toolbox
|
308c86ceb350925050ba3cad4931af09e36782ef
|
[
"MIT"
] | 37
|
2021-12-10T19:08:31.000Z
|
2022-03-29T04:13:24.000Z
|
rfa_toolbox/graphs.py
|
MLRichter/receptive_field_analysis_toolbox
|
308c86ceb350925050ba3cad4931af09e36782ef
|
[
"MIT"
] | 4
|
2022-01-11T07:37:21.000Z
|
2022-01-14T09:05:30.000Z
|
from operator import attrgetter
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Union
import numpy as np
from attr import attrib, attrs
from rfa_toolbox.domain import Layer, Node
def receptive_field_provider_with_1x1_handling(
node: "EnrichedNetworkNode",
) -> Optional[int]:
"""Provides the MINIMUM receptive field size of a layer
with an exception handling for 1x1-convolutions, which are treated
as having an infinite receptive field size. This provider
is based on the hypothesis that 1x1 convolutions are
always unproductive. It is worth noting that this
hypothesis is still under investigation.
Args:
node: the node to receive the receptive field size from.
Returns:
the receptive field size, infinite if the kernel size is equal to 1.
"""
return node.receptive_field_min if node.layer_type.kernel_size > 1 else np.inf
def receptive_field_provider(node: "EnrichedNetworkNode") -> Optional[int]:
"""Provides the MINIMUM receptive field size from a node.
Based on the result of https://arxiv.org/abs/2106.12307 this
is currently the most reliable way of predicting unproductive layers.
Args:
node: the node to return the receptive field size.
Returns:
the minimum receptive field size.
"""
return node.receptive_field_min
# FIXME: Make this function work for scenarios,
# where the infos contain tuples of receptive field sizes describing an area
def naive_minmax_filter(
info: Tuple["ReceptiveFieldInfo"],
) -> Tuple["ReceptiveFieldInfo", "ReceptiveFieldInfo"]:
"""Filters all receptive field infos, except for the one
with the mininum and maximum receptive field size.
Currently only works if all receptive field sizes, kernel and
stride sizes are scalar
Args:
info: Tuple of receptive field info containers to filters
Returns:
A two-tuple containing the minimum and maximum receptive field size info.
"""
maximum_receptive_field: ReceptiveFieldInfo = max(
info, key=attrgetter("receptive_field")
)
minimum_receptive_field: ReceptiveFieldInfo = min(
info, key=attrgetter("receptive_field")
)
return minimum_receptive_field, maximum_receptive_field
def noop_filter(
info: Tuple["ReceptiveFieldInfo", ...],
) -> Tuple["ReceptiveFieldInfo", ...]:
return info
def filter_all_rf_info_with_infinite_receptive_field(
info: Tuple["ReceptiveFieldInfo", ...],
) -> Tuple["ReceptiveFieldInfo", ...]:
result = list()
for rf_info in info:
if isinstance(rf_info.receptive_field, Sequence):
if not np.isinf(rf_info.receptive_field).any():
result.append(rf_info)
else:
if not np.isinf(rf_info.receptive_field):
result.append(rf_info)
if result:
return tuple(result)
else:
return info
KNOWN_FILTER_MAPPING = {
"inf": filter_all_rf_info_with_infinite_receptive_field,
None: noop_filter,
}
@attrs(auto_attribs=True, frozen=True, slots=True)
class ReceptiveFieldInfo:
"""The container holding information for the successive receptive
field size computation.
Args:
receptive_field: the receptive field size
multiplicator: the current growth multiplicator,
increased by stride sizes > 1
"""
receptive_field: Union[int, Sequence[int]] = attrib(
converter=lambda x: tuple(x)
if isinstance(x, Sequence) or isinstance(x, np.ndarray)
else x
)
multiplicator: Union[int, Sequence[int]] = attrib(
converter=lambda x: tuple(x)
if isinstance(x, Sequence) or isinstance(x, np.ndarray)
else x
)
@attrs(auto_attribs=True, frozen=True, slots=True)
class LayerDefinition(Layer):
"""The standard representation of a neural network layer.
Contains information needed for receptive field computation.
Args:
name: name of the layer
kernel_size: size of the kernel, None if this is a dense-layer
stride_size: the stride size the kernel is convolved. None for dense-layers.
filters: number of filter produced by the convolution operation
units: number of units of a fully connected layer
"""
name: str
kernel_size: Optional[Union[int, Sequence[int]]] = attrib(
converter=lambda x: np.inf if x is None else x, default=None
)
stride_size: Optional[Union[int, Sequence[int]]] = attrib(
converter=lambda x: 1 if x is None else x, default=None
)
filters: Optional[int] = None
units: Optional[int] = None
@kernel_size.validator
def validate_kernel_size(
self, attribute: str, value: Union[int, Sequence[int]]
) -> None:
if isinstance(value, Sequence):
for v in value:
self.validate_kernel_size(attribute, v)
elif value is not None and value < 1:
raise ValueError(
f"{attribute} values must be greater than 0 or "
f"infinite (which indicates a dense layer)"
)
@stride_size.validator
def validate_stride_size(
self, attribute: str, value: Union[int, Sequence[int]]
) -> None:
if isinstance(value, Sequence):
for v in value:
self.validate_stride_size(attribute, v)
elif value is not None and value < 1:
raise ValueError(
f"{attribute} values must be greater than 0 or "
f"infinite (which indicates a dense layer)"
)
def _check_consistency_for_kernel_and_stride_sequences(self) -> None:
if isinstance(self.kernel_size, Sequence) and isinstance(
self.stride_size, Sequence
):
if len(self.kernel_size) != len(self.stride_size):
raise ValueError(
"kernel_size and stride_size must have the same length"
)
for i in range(len(self.kernel_size)):
if len(self.kernel_size) != len(self.stride_size):
raise ValueError(
"When kernel_size and stride_size are both sequences, "
"they must have the same length, kernel_size: "
f"{self.kernel_size}, stride_size: {self.stride_size}"
)
def __attrs_post_init__(self):
self._check_consistency_for_kernel_and_stride_sequences()
@classmethod
def from_dict(cls, config) -> "LayerDefinition":
"""Create a LayerDefinition from the dictionary.
Args:
config: create layer definiton from the dictionary.
Returns:
A LayerDefinition instance
"""
return LayerDefinition(**config)
def to_dict(self) -> Dict[str, Union[int, str]]:
"""Create a json-serializable dictionary from this object instance.
Returns:
A diction from which this object can be reconstructed.
"""
return {
"name": self.name,
"kernel_size": self.kernel_size,
"stride_size": self.stride_size,
}
def compute_receptive_field_sizes(
receptive_field_info: Set[ReceptiveFieldInfo], layer_info: Layer
) -> Tuple[ReceptiveFieldInfo]:
"""Compute the receptive field sizes for a node given
receptive field-infos from predecessor-nodes and the
current layer information.
Args:
receptive_field_info: A iterable collection of receptive field informations
collected from predecessor layers.
layer_info: The layer information container for the current layer.
Returns:
A tuple of ReceptiveFieldInfo-instances for this particular layer.
"""
result: List[ReceptiveFieldInfo] = list()
for rf_info in receptive_field_info:
receptive_field = np.asarray(rf_info.receptive_field) + (
(np.asarray(layer_info.kernel_size) - 1) * np.asarray(rf_info.multiplicator)
)
multiplicator = np.asarray(layer_info.stride_size) * np.asarray(
rf_info.multiplicator
)
new_info = ReceptiveFieldInfo(
receptive_field=receptive_field, multiplicator=multiplicator
)
result.append(new_info)
return tuple(result)
@attrs(auto_attribs=True, frozen=True, slots=True, hash=False, repr=False)
class EnrichedNetworkNode(Node):
"""The EnrichedNetworkNode is the core component of a network graph in this framework.
Any node af a network can be used as a handle for the entire graph.
A neural network is expected to have exactly one input and arbitrary
many outputs. Networks with multiple inputs may cause inconsistencies.
Args:
name: the name of the current node
layer_info: the layer information container
predecessors: A list of predecessor nodes, empty-list by default.
receptie_field_info_filter: Function, which filters the ReceptiveFieldInfo
to reduce the number of computations in networks
with many pathways or skip connections.
By default only the highest and lowest
receptive field size container are kept.
Params:
receptive_field_info: a n-tuple holding the ReceptiveFieldInfo-instances,
used for receptive field size computation
receptive_field_min: minimum receptive field size
receptive_field_max: maximum receptive field size
receptive_field_sizes: all receptive field sizes, please note
that a filter is applied
all_laxers: a list of all nodes contained in the graph
kernel_size: the size of the kernel, passthrough from
the layer_info container
stride_size: the stride size, passthrough from
the layer_info container
"""
name: str
layer_info: LayerDefinition
predecessors: List["EnrichedNetworkNode"] = attrib(converter=list)
succecessors: List["EnrichedNetworkNode"] = attrib(
init=False, factory=list, eq=False
)
receptive_field_info: Tuple[ReceptiveFieldInfo] = attrib(init=False)
receptive_field_min: int = attrib(init=False)
receptive_field_max: int = attrib(init=False)
receptive_field_info_filter: Callable[
[Tuple[ReceptiveFieldInfo, ...]], Tuple[ReceptiveFieldInfo, ...]
] = noop_filter
all_layers: List["EnrichedNetworkNode"] = attrib(init=False)
@property
def receptive_field_sizes(self) -> List[int]:
return [elem.receptive_field for elem in self.receptive_field_info]
def _group_by_dim(
self, rf_sizes: List[Union[Sequence[int], int]]
) -> Dict[Union[int, str], List[int]]:
"""Find the minimum receptive field size.
Args:
rf_sizes: A list of receptive field sizes.
Returns:
The minimum size.
"""
if all(
[
isinstance(elem, int) and not isinstance(elem, Sequence)
for elem in rf_sizes
]
):
return {"all": rf_sizes}
else:
result: Dict[Union[int, str], List[int]] = {"all": []}
for rf_size in rf_sizes:
if isinstance(rf_size, Sequence):
for i, size in enumerate(rf_size):
if i not in result:
result[i] = []
result[i].append(size)
else:
result["all"].append(rf_size)
return result
@staticmethod
def _apply_function_on_receptive_field_groups(
groups: Dict[Union[int, str], List[int]], func: Callable[[List[int]], int]
) -> Union[Sequence[int], int]:
"""Apply a function on a list of receptive field sizes.
Args:
groups: A dictionary of receptive field sizes.
func: The function to apply.
Returns:
The result of the function.
"""
if "all" in groups:
scalars: List[int] = groups.pop("all")
if len(groups) == 0:
return func(scalars)
else:
raise ValueError(
"'all'-key not in sequence for receptive field computation"
)
result: List[int] = []
max_dim: int = max(groups.keys())
for i in range(max_dim + 1):
if i not in groups:
raise ValueError(f"Missing dimension {i}")
dim: List[int] = groups[i] + scalars
result.append(func(dim))
return tuple(result)
def _apply_function_on_receptive_field_sizes(
self, func: Callable[[List[int]], int]
) -> Union[Sequence[int], int]:
"""Apply a function on the receptive field sizes.
Args:
func: The function to apply.
Returns:
The result of the function.
"""
return self._apply_function_on_receptive_field_groups(
self._group_by_dim(self.receptive_field_sizes), func
)
def _scale_factors(self) -> Union[int, Sequence[int]]:
return [elem.multiplicator for elem in self.receptive_field_info]
def _apply_function_on_multiplicator(
self, func: Callable[[List[int]], int]
) -> Union[Sequence[int], int]:
"""Apply a function on the multiplicators of the receptive field growths
Args:
func: The function to apply.
Returns:
The result of the function.
"""
return self._apply_function_on_receptive_field_groups(
self._group_by_dim(self._scale_factors()), func
)
def _receptive_field_min(self):
return self._apply_function_on_receptive_field_sizes(
lambda x: min(x, default=0)
)
# return min(self.receptive_field_sizes, default=0)
def _receptive_field_max(self):
return self._apply_function_on_receptive_field_sizes(
lambda x: max(x, default=0)
)
# return max(self.receptive_field_sizes, default=0)
@property
def kernel_size(self):
return self.layer_info.kernel_size
@property
def stride_size(self):
return self.layer_info.stride_size
@predecessors.validator
def verify_predecessor_list(
self, attribute: str, value: List["EnrichedNetworkNode"]
) -> None:
if len(value) != 0:
if not all([isinstance(node, EnrichedNetworkNode) for node in value]):
raise ValueError(f"{attribute} must be a list of EnrichedNetworkNodes")
def __attrs_post_init__(self):
infos: Set[ReceptiveFieldInfo] = set()
if self.layer_info.kernel_size == np.inf:
infos.update([ReceptiveFieldInfo(receptive_field=np.inf, multiplicator=1)])
elif len(self.predecessors):
for pred in self.predecessors:
infos.update(pred.receptive_field_info)
else:
infos.update([ReceptiveFieldInfo(receptive_field=1, multiplicator=1)])
rf_infos = compute_receptive_field_sizes(infos, self.layer_info)
rf_infos_filtered = self.receptive_field_info_filter(rf_infos)
object.__setattr__(
self,
"receptive_field_info",
rf_infos_filtered,
)
object.__setattr__(self, "receptive_field_min", self._receptive_field_min())
object.__setattr__(self, "receptive_field_max", self._receptive_field_max())
object.__setattr__(
self,
"all_layers",
[] if not self.predecessors else self.predecessors[0].all_layers,
)
self.all_layers.append(self)
for pred in self.predecessors:
pred.succecessors.append(self)
def get_maximum_scale_factor(self) -> Union[int, Sequence[int]]:
return self._apply_function_on_multiplicator(lambda x: max(x, default=0))
def compute_feature_map_size(self, input_resolution: Union[int, Sequence[int]]):
"""Compute the feature map size.
Args:
input_resolution: The input resolution.
Returns:
The feature map size.
"""
scale_factor = self.get_maximum_scale_factor()
return np.asarray(input_resolution) // np.asarray(scale_factor)
def _feature_map_size_larger_than_kernel(
self, pred: "EnrichedNetworkNode", input_resolution: int
):
return np.all(
np.asarray(
pred.compute_feature_map_size(input_resolution)
<= np.asarray(self.layer_info.kernel_size)
)
)
def is_border(
self,
input_resolution: Union[int, Sequence[int]],
receptive_field_provider: Callable[
["EnrichedNetworkNode"], Union[float, int]
] = receptive_field_provider,
filter_kernel_size_1: bool = False,
) -> bool:
"""Checks if this layer is a border layer.
A border layer is predicted not advance the
intermediate solution
quality and can thus be considered "dead weight".
Args:
input_resolution: the input resolution to check for
receptive_field_provider: a provider function that produces a
receptive field value, from which the
border-layer decision can be derived.
By default the minimum receptive field size
will yielded from the set of
receptive field sizes, which is currently
the most reliable way of predicting
unproductive layers.
filter_kernel_size_1: any layer with a kernel size of 1 may not
be a border layer if this is set.
Returns:
True if this layer is predicted to be unproductive
for the given input resolution, else False.
"""
# the border layer is defined as the layer that receives
# all inputs with a receptive field size
# SMALLER than the input resolution
direct_predecessors = [
np.all(
np.asarray(input_resolution)
<= np.asarray(receptive_field_provider(pred))
)
for pred in self.predecessors
]
# of course, this means that this layer also needs to fullfill this property
own = np.all(
np.asarray(input_resolution) < np.asarray(self.receptive_field_min)
)
if self.layer_info.kernel_size != np.inf:
behaves_like_fully_connected = np.asarray(
[
self._feature_map_size_larger_than_kernel(pred, input_resolution)
for pred in self.predecessors
]
)
if len(behaves_like_fully_connected) != 0 and np.all(
behaves_like_fully_connected
):
return True
# additionally (only relevant for multipath architectures)
# all following layer are border layers as well
# successors = [
# input_resolution <= result
# for result in self._apply_function_to_all_successors(
# receptive_field_provider
# )
# ]
# in short all direct predecessors,
# the layer itself and all following layers have a receptive field size
# GREATER than the input resolution
# return all(direct_predecessors) and own and all(successors)
can_be_border = not (
filter_kernel_size_1
and (
(
isinstance(self.layer_info.kernel_size, Sequence)
and all(np.asarray(self.layer_info.kernel_size) == 1)
)
or (
isinstance(self.layer_info.kernel_size, int)
and self.kernel_size == 1
)
)
)
return all(direct_predecessors) and own and can_be_border # and all(successors)
def is_in(self, container: Union[List[Node], Dict[Node, Any]]) -> bool:
"""Checks if this node is inside a an iterable collection.
Args:
container: dictionary with node as key or list of EnrichedNetworkNodes.
Returns:
True if the node is contained in the collection, else False.
"""
if isinstance(container, list):
return any(id(self) == id(node) for node in container)
else:
return any(id(self) == id(node) for node in container.keys())
def __repr__(self):
pred_names = [pred.name for pred in self.predecessors]
succ_names = [succ.name for succ in self.succecessors]
return (
f"EnrichedNetworkNode(\n"
f"\tname={self.name},\n"
f"\tpredecessors={pred_names},\n"
f"\tsuccessors={succ_names},\n"
f"\tlayer_info={self.layer_info},\n"
f"\treceptive_field_min={self.receptive_field_min},\n"
f"\treceptive_field_max={self.receptive_field_max},\n"
f"\treceptive_field_sizes={self.receptive_field_sizes},\n"
f")\n"
)
| 36.649241
| 90
| 0.611558
|
acfda95c46e1346dfcc28d340e4ff72cece18afb
| 3,104
|
py
|
Python
|
maml_rl/policies/normal_mlp_a2c.py
|
YingyingF/pytorch-maml-rl
|
b0b5e27ac8e136ddfeffd8cbaa707c7ea529f6b6
|
[
"MIT"
] | null | null | null |
maml_rl/policies/normal_mlp_a2c.py
|
YingyingF/pytorch-maml-rl
|
b0b5e27ac8e136ddfeffd8cbaa707c7ea529f6b6
|
[
"MIT"
] | null | null | null |
maml_rl/policies/normal_mlp_a2c.py
|
YingyingF/pytorch-maml-rl
|
b0b5e27ac8e136ddfeffd8cbaa707c7ea529f6b6
|
[
"MIT"
] | 1
|
2018-12-02T22:36:36.000Z
|
2018-12-02T22:36:36.000Z
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
from collections import OrderedDict
from maml_rl.policies.policy import Policy, weight_init
class NormalMLPPolicyA2C(Policy):
"""Policy network based on a multi-layer perceptron (MLP), with a
`Normal` distribution output, with trainable standard deviation. This
policy network can be used on tasks with continuous action spaces (eg.
`HalfCheetahDir`). The code is adapted from
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/sandbox/rocky/tf/policies/maml_minimal_gauss_mlp_policy.py
"""
def __init__(self, input_size, output_size, hidden_sizes=(),
nonlinearity=F.relu, init_std=1.0, min_std=1e-6):
super(NormalMLPPolicy, self).__init__(input_size=input_size, output_size=output_size)
self.hidden_sizes = hidden_sizes
self.nonlinearity = nonlinearity
self.min_log_std = math.log(min_std)
self.num_layers = len(hidden_sizes) + 3
#import pdb; pdb.set_trace()
layer_sizes = (input_size,) + hidden_sizes
for i in range(1, self.num_layers-2):
self.add_module('layer{0}'.format(i),
nn.Linear(layer_sizes[i - 1], layer_sizes[i]))
self.add_module('layer{0}'.format(i), nn.Linear(layer_sizes[i-1],layer_sizes[i]))
#self.connected = nn.Linear(layer_sizes[-1],layer_sizes[-1])
self.mu = nn.Linear(layer_sizes[-1], output_size)
self.sigma = nn.Parameter(torch.Tensor(output_size))
self.sigma.data.fill_(math.log(init_std))
self.apply(weight_init)
#self.policy = nn.Linear(layer_sizes[-1],output_size)
self.value = nn.Linear(layer_sizes[-1],1)
def forward(self, input, params=None):
if params is None:
params = OrderedDict(self.named_parameters())
output = input
for i in range(1, self.num_layers-2):
output = F.linear(output,
weight=params['layer{0}.weight'.format(i)],
bias=params['layer{0}.bias'.format(i)])
output = self.nonlinearity(output)
#import pdb; pdb.set_trace()
#output = F.linear(output,weight=params['connected.weight'],bias=params['connected.bias'])
#output = F.linear(output,weight=params['layer{0}.weight'.format(self.num_layers-1)],bias=params['layer{0}.bias'.format(self.num_layers-1)])
#connected = F.linear(output,weight= params['connected.weight'], bias= params['connected.bias'])
#output = F.linear(output,weight= params['value.weight'],bias =params['.bias'])
value = F.linear(output,weight= params['value.weight'],bias =params['value.bias'])
mu = F.linear(output, weight=params['mu.weight'],bias=params['mu.bias'])
scale = torch.exp(torch.clamp(params['sigma'], min=self.min_log_std))
policy = Normal(loc =mu,scale =scale)
return policy,value
| 47.030303
| 152
| 0.645941
|
acfdab66cb4596a032ae06a2a70da159e1e54a9d
| 20,224
|
py
|
Python
|
src/rez/utils/filesystem.py
|
alexey-pelykh/rez
|
ad12105d89d658e4d2ea9249e537b3de90391f0e
|
[
"Apache-2.0"
] | null | null | null |
src/rez/utils/filesystem.py
|
alexey-pelykh/rez
|
ad12105d89d658e4d2ea9249e537b3de90391f0e
|
[
"Apache-2.0"
] | null | null | null |
src/rez/utils/filesystem.py
|
alexey-pelykh/rez
|
ad12105d89d658e4d2ea9249e537b3de90391f0e
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
"""
Filesystem-related utilities.
"""
from __future__ import print_function
from threading import Lock
from tempfile import mkdtemp
from contextlib import contextmanager
from uuid import uuid4
import errno
import weakref
import atexit
import posixpath
import ntpath
import os.path
import shutil
import os
import re
import stat
import platform
import uuid
from rez.vendor.six import six
from rez.utils.platform_ import platform_
is_windows = platform.system() == "Windows"
class TempDirs(object):
"""Tempdir manager.
Makes tmpdirs and ensures they're cleaned up on program exit.
"""
instances_lock = Lock()
instances = []
def __init__(self, tmpdir, prefix="rez_"):
self.tmpdir = tmpdir
self.prefix = prefix
self.dirs = set()
self.lock = Lock()
with TempDirs.instances_lock:
TempDirs.instances.append(weakref.ref(self))
def mkdtemp(self, cleanup=True):
path = mkdtemp(dir=self.tmpdir, prefix=self.prefix)
if not cleanup:
return path
with self.lock:
self.dirs.add(path)
return path
def __del__(self):
self.clear()
def clear(self):
with self.lock:
if not self.dirs:
return
dirs = self.dirs
self.dirs = set()
for path in dirs:
if os.path.exists(path) and not os.getenv("REZ_KEEP_TMPDIRS"):
shutil.rmtree(path)
@classmethod
def clear_all(cls):
with TempDirs.instances_lock:
instances = cls.instances[:]
for ref in instances:
instance = ref()
if instance is not None:
instance.clear()
atexit.register(TempDirs.clear_all)
@contextmanager
def make_path_writable(path):
"""Temporarily make `path` writable, if possible.
Args:
path (str): Path to make temporarily writable
"""
try:
orig_mode = os.stat(path).st_mode
new_mode = orig_mode
if not os.access(path, os.W_OK):
new_mode = orig_mode | stat.S_IWUSR
# make writable
if new_mode != orig_mode:
os.chmod(path, new_mode)
except OSError:
# ignore access errors here, and just do nothing. It will be more
# intuitive for the calling code to fail on access instead.
#
orig_mode = None
new_mode = None
# yield, then reset mode back to original
try:
yield
finally:
if new_mode != orig_mode:
os.chmod(path, orig_mode)
@contextmanager
def retain_cwd():
"""Context manager that keeps cwd unchanged afterwards.
"""
cwd = os.getcwd()
try:
yield
finally:
os.chdir(cwd)
def get_existing_path(path, topmost_path=None):
"""Get the longest parent path in `path` that exists.
If `path` exists, it is returned.
Args:
path (str): Path to test
topmost_path (str): Do not test this path or above
Returns:
str: Existing path, or None if no path was found.
"""
prev_path = None
if topmost_path:
topmost_path = os.path.normpath(topmost_path)
while True:
if os.path.exists(path):
return path
path = os.path.dirname(path)
if path == prev_path:
return None
if topmost_path and os.path.normpath(path) == topmost_path:
return None
prev_path = path
def safe_listdir(path):
"""Safe listdir.
Works in a multithread/proc scenario where dirs may be deleted at any time
"""
try:
return os.listdir(path)
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
return []
raise
def safe_makedirs(path):
"""Safe makedirs.
Works in a multithreaded scenario.
"""
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError:
if not os.path.exists(path):
raise
def safe_remove(path):
"""Safely remove the given file or directory.
Works in a multithreaded scenario.
"""
if not os.path.exists(path):
return
try:
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path)
else:
os.remove(path)
except OSError:
if os.path.exists(path):
raise
def forceful_rmtree(path):
"""Like shutil.rmtree, but may change permissions.
Specifically, non-writable dirs within `path` can cause rmtree to fail. This
func chmod's to writable to avoid this issue, if possible.
Also handled:
* path length over 259 char (on Windows)
* unicode path
"""
if six.PY2:
path = unicode(path)
def _on_error(func, path, exc_info):
try:
if is_windows:
path = windows_long_path(path)
parent_path = os.path.dirname(path)
if not os.access(parent_path, os.W_OK):
st = os.stat(parent_path)
os.chmod(parent_path, st.st_mode | stat.S_IWUSR)
if not os.access(path, os.W_OK):
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IWUSR)
except:
# avoid confusion by ensuring original exception is reraised
pass
func(path)
shutil.rmtree(path, onerror=_on_error)
def replacing_symlink(source, link_name):
"""Create symlink that overwrites any existing target.
"""
with make_tmp_name(link_name) as tmp_link_name:
os.symlink(source, tmp_link_name)
replace_file_or_dir(link_name, tmp_link_name)
def replacing_copy(src, dest, follow_symlinks=False):
"""Perform copy that overwrites any existing target.
Will copy/copytree `src` to `dest`, and will remove `dest` if it exists,
regardless of what it is.
If `follow_symlinks` is False, symlinks are preserved, otherwise their
contents are copied.
Note that this behavior is different to `shutil.copy`, which copies src
into dest if dest is an existing dir.
"""
with make_tmp_name(dest) as tmp_dest:
if os.path.islink(src) and not follow_symlinks:
# special case - copy just a symlink
src_ = os.readlink(src)
os.symlink(src_, tmp_dest)
elif os.path.isdir(src):
# copy a dir
shutil.copytree(src, tmp_dest, symlinks=(not follow_symlinks))
else:
# copy a file
shutil.copy2(src, tmp_dest)
replace_file_or_dir(dest, tmp_dest)
def replace_file_or_dir(dest, source):
"""Replace `dest` with `source`.
Acts like an `os.rename` if `dest` does not exist. Otherwise, `dest` is
deleted and `src` is renamed to `dest`.
"""
from rez.vendor.atomicwrites import replace_atomic
if not os.path.exists(dest):
try:
os.rename(source, dest)
return
except:
if not os.path.exists(dest):
raise
try:
replace_atomic(source, dest)
return
except:
pass
with make_tmp_name(dest) as tmp_dest:
os.rename(dest, tmp_dest)
os.rename(source, dest)
def additive_copytree(src, dst, symlinks=False, ignore=None):
"""Version of `copytree` that merges into an existing directory.
"""
if not os.path.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
additive_copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
@contextmanager
def make_tmp_name(name):
"""Generates a tmp name for a file or dir.
This is a tempname that sits in the same dir as `name`. If it exists on
disk at context exit time, it is deleted.
"""
path, base = os.path.split(name)
# there's a reason this isn't a hidden file:
# https://github.com/nerdvegas/rez/pull/1088
#
tmp_base = "_tmp-%s-%s" % (base, uuid4().hex)
tmp_name = os.path.join(path, tmp_base)
try:
yield tmp_name
finally:
safe_remove(tmp_name)
def is_subdirectory(path_a, path_b):
"""Returns True if `path_a` is a subdirectory of `path_b`."""
path_a = os.path.realpath(path_a)
path_b = os.path.realpath(path_b)
try:
relative = os.path.relpath(path_a, path_b)
except ValueError:
# Different mounts on Windows:
# ValueError: path is on mount 'c:', start on mount 'd:'
#
return False
return not relative.startswith(os.pardir + os.sep)
def find_matching_symlink(path, source):
"""Find a symlink under `path` that points at `source`.
If source is relative, it is considered relative to `path`.
Returns:
str: Name of symlink found, or None.
"""
def to_abs(target):
if os.path.isabs(target):
return target
else:
return os.path.normpath(os.path.join(path, target))
abs_source = to_abs(source)
for name in os.listdir(path):
linkpath = os.path.join(path, name)
if os.path.islink(linkpath):
source_ = os.readlink(linkpath)
if to_abs(source_) == abs_source:
return name
return None
def copy_or_replace(src, dst):
'''try to copy with mode, and if it fails, try replacing
'''
try:
shutil.copy(src, dst)
return
except (OSError, IOError) as e:
# It's possible that the file existed, but was owned by someone
# else - in that situation, shutil.copy might then fail when it
# tries to copy perms.
# However, it's possible that we have write perms to the dir -
# in which case, we can just delete and replace
#
if e.errno != errno.EPERM:
raise
# try copying into a temporary location beside the old file - if we have
# perms to do that, we should have perms to then delete the old file, and
# move the new one into place
#
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
dst_dir, dst_name = os.path.split(dst)
tmp_filename = ".%s.%s" % (uuid.uuid4().hex, dst_name)
dst_temp = os.path.join(dst_dir, tmp_filename)
shutil.copy(src, dst_temp)
if not os.path.isfile(dst_temp):
raise RuntimeError(
"shutil.copy completed successfully, but path"
" '%s' still did not exist" % dst_temp
)
os.remove(dst)
shutil.move(dst_temp, dst)
def copytree(src, dst, symlinks=False, ignore=None, hardlinks=False):
'''copytree that supports hard-linking
'''
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
if hardlinks:
def copy(srcname, dstname):
try:
# try hard-linking first
os.link(srcname, dstname)
except OSError:
shutil.copy2(srcname, dstname)
else:
copy = shutil.copy2
if not os.path.isdir(dst):
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
copy(srcname, dstname)
# XXX What about devices, sockets etc.?
except (IOError, os.error) as why:
errors.append((srcname, dstname, str(why)))
# catch the Error from the recursive copytree so that we can
# continue with other files
except shutil.Error as err:
errors.extend(err.args[0])
try:
shutil.copystat(src, dst)
except shutil.WindowsError:
# can't copy file access times on Windows
pass
except OSError as why:
errors.extend((src, dst, str(why)))
if errors:
raise shutil.Error(errors)
def movetree(src, dst):
"""Attempts a move, and falls back to a copy+delete if this fails
"""
try:
shutil.move(src, dst)
except:
copytree(src, dst, symlinks=True, hardlinks=True)
shutil.rmtree(src)
def safe_chmod(path, mode):
"""Set the permissions mode on path, but only if it differs from the current mode.
"""
if stat.S_IMODE(os.stat(path).st_mode) != mode:
os.chmod(path, mode)
def to_nativepath(path):
path = path.replace('\\', '/')
return os.path.join(*path.split('/'))
def to_ntpath(path):
return ntpath.sep.join(path.split(posixpath.sep))
def to_posixpath(path):
return posixpath.sep.join(path.split(ntpath.sep))
def canonical_path(path, platform=None):
""" Resolves symlinks, and formats filepath.
Resolves symlinks, lowercases if filesystem is case-insensitive,
formats filepath using slashes appropriate for platform.
Args:
path (str): Filepath being formatted
platform (rez.utils.platform_.Platform): Indicates platform path is being
formatted for. Defaults to current platform.
Returns:
str: Provided path, formatted for platform.
"""
if platform is None:
platform = platform_
path = os.path.normpath(os.path.realpath(path))
if not platform.has_case_sensitive_filesystem:
return path.lower()
return path
def encode_filesystem_name(input_str):
"""Encodes an arbitrary unicode string to a generic filesystem-compatible
non-unicode filename.
The result after encoding will only contain the standard ascii lowercase
letters (a-z), the digits (0-9), or periods, underscores, or dashes
(".", "_", or "-"). No uppercase letters will be used, for
comaptibility with case-insensitive filesystems.
The rules for the encoding are:
1) Any lowercase letter, digit, period, or dash (a-z, 0-9, ., or -) is
encoded as-is.
2) Any underscore is encoded as a double-underscore ("__")
3) Any uppercase ascii letter (A-Z) is encoded as an underscore followed
by the corresponding lowercase letter (ie, "A" => "_a")
4) All other characters are encoded using their UTF-8 encoded unicode
representation, in the following format: "_NHH..., where:
a) N represents the number of bytes needed for the UTF-8 encoding,
except with N=0 for one-byte representation (the exception for N=1
is made both because it means that for "standard" ascii characters
in the range 0-127, their encoding will be _0xx, where xx is their
ascii hex code; and because it mirrors the ways UTF-8 encoding
itself works, where the number of bytes needed for the character can
be determined by counting the number of leading "1"s in the binary
representation of the character, except that if it is a 1-byte
sequence, there are 0 leading 1's).
b) HH represents the bytes of the corresponding UTF-8 encoding, in
hexadecimal (using lower-case letters)
As an example, the character "*", whose (hex) UTF-8 representation
of 2A, would be encoded as "_02a", while the "euro" symbol, which
has a UTF-8 representation of E2 82 AC, would be encoded as
"_3e282ac". (Note that, strictly speaking, the "N" part of the
encoding is redundant information, since it is essentially encoded
in the UTF-8 representation itself, but it makes the resulting
string more human-readable, and easier to decode).
As an example, the string "Foo_Bar (fun).txt" would get encoded as:
_foo___bar_020_028fun_029.txt
"""
if isinstance(input_str, six.string_types):
input_str = unicode(input_str)
elif not isinstance(input_str, unicode):
raise TypeError("input_str must be a %s" % six.string_types[0].__name__)
as_is = u'abcdefghijklmnopqrstuvwxyz0123456789.-'
uppercase = u'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
result = []
for char in input_str:
if char in as_is:
result.append(char)
elif char == u'_':
result.append('__')
elif char in uppercase:
result.append('_%s' % char.lower())
else:
utf8 = char.encode('utf8')
N = len(utf8)
if N == 1:
N = 0
HH = ''.join('%x' % ord(c) for c in utf8)
result.append('_%d%s' % (N, HH))
return ''.join(result)
_FILESYSTEM_TOKEN_RE = re.compile(r'(?P<as_is>[a-z0-9.-])|(?P<underscore>__)|_(?P<uppercase>[a-z])|_(?P<N>[0-9])')
_HEX_RE = re.compile('[0-9a-f]+$')
def decode_filesystem_name(filename):
"""Decodes a filename encoded using the rules given in encode_filesystem_name
to a unicode string.
"""
result = []
remain = filename
i = 0
while remain:
# use match, to ensure it matches from the start of the string...
match = _FILESYSTEM_TOKEN_RE.match(remain)
if not match:
raise ValueError("incorrectly encoded filesystem name %r"
" (bad index: %d - %r)" % (filename, i,
remain[:2]))
match_str = match.group(0)
match_len = len(match_str)
i += match_len
remain = remain[match_len:]
match_dict = match.groupdict()
if match_dict['as_is']:
result.append(unicode(match_str))
elif match_dict['underscore']:
result.append(u'_')
elif match_dict['uppercase']:
result.append(unicode(match_dict['uppercase'].upper()))
elif match_dict['N']:
N = int(match_dict['N'])
if N == 0:
N = 1
# hex-encoded, so need to grab 2*N chars
bytes_len = 2 * N
i += bytes_len
bytes = remain[:bytes_len]
remain = remain[bytes_len:]
# need this check to ensure that we don't end up eval'ing
# something nasty...
if not _HEX_RE.match(bytes):
raise ValueError("Bad utf8 encoding in name %r"
" (bad index: %d - %r)" % (filename, i, bytes))
bytes_repr = ''.join('\\x%s' % bytes[i:i + 2]
for i in xrange(0, bytes_len, 2))
bytes_repr = "'%s'" % bytes_repr
result.append(eval(bytes_repr).decode('utf8'))
else:
raise ValueError("Unrecognized match type in filesystem name %r"
" (bad index: %d - %r)" % (filename, i, remain[:2]))
return u''.join(result)
def test_encode_decode():
def do_test(orig, expected_encoded):
print('=' * 80)
print(orig)
encoded = encode_filesystem_name(orig)
print(encoded)
assert encoded == expected_encoded
decoded = decode_filesystem_name(encoded)
print(decoded)
assert decoded == orig
do_test("Foo_Bar (fun).txt", '_foo___bar_020_028fun_029.txt')
# u'\u20ac' == Euro symbol
do_test(u"\u20ac3 ~= $4.06", '_3e282ac3_020_07e_03d_020_0244.06')
def walk_up_dirs(path):
"""Yields absolute directories starting with the given path, and iterating
up through all it's parents, until it reaches a root directory"""
prev_path = None
current_path = os.path.abspath(path)
while current_path != prev_path:
yield current_path
prev_path = current_path
current_path = os.path.dirname(prev_path)
def windows_long_path(dos_path):
"""Prefix '\\?\' for path longer than 259 char (Win32API limitation)
"""
path = os.path.abspath(dos_path)
if path.startswith("\\\\?\\"):
pass
elif path.startswith("\\\\"):
path = "\\\\?\\UNC\\" + path[2:]
else:
path = "\\\\?\\" + path
return path
| 28.891429
| 114
| 0.606705
|
acfdac5451f4a4b5b09c15a64f0601973e0001d2
| 588,360
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20190401/outputs.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_native/network/v20190401/outputs.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_native/network/v20190401/outputs.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
__all__ = [
'AddressSpaceResponse',
'ApplicationGatewayAuthenticationCertificateResponse',
'ApplicationGatewayAutoscaleConfigurationResponse',
'ApplicationGatewayBackendAddressPoolResponse',
'ApplicationGatewayBackendAddressResponse',
'ApplicationGatewayBackendHealthHttpSettingsResponseResult',
'ApplicationGatewayBackendHealthServerResponseResult',
'ApplicationGatewayBackendHttpSettingsResponse',
'ApplicationGatewayConnectionDrainingResponse',
'ApplicationGatewayCustomErrorResponse',
'ApplicationGatewayFirewallDisabledRuleGroupResponse',
'ApplicationGatewayFirewallExclusionResponse',
'ApplicationGatewayFrontendIPConfigurationResponse',
'ApplicationGatewayFrontendPortResponse',
'ApplicationGatewayHeaderConfigurationResponse',
'ApplicationGatewayHttpListenerResponse',
'ApplicationGatewayIPConfigurationResponse',
'ApplicationGatewayPathRuleResponse',
'ApplicationGatewayProbeHealthResponseMatchResponse',
'ApplicationGatewayProbeResponse',
'ApplicationGatewayRedirectConfigurationResponse',
'ApplicationGatewayRequestRoutingRuleResponse',
'ApplicationGatewayResponse',
'ApplicationGatewayRewriteRuleActionSetResponse',
'ApplicationGatewayRewriteRuleConditionResponse',
'ApplicationGatewayRewriteRuleResponse',
'ApplicationGatewayRewriteRuleSetResponse',
'ApplicationGatewaySkuResponse',
'ApplicationGatewaySslCertificateResponse',
'ApplicationGatewaySslPolicyResponse',
'ApplicationGatewayTrustedRootCertificateResponse',
'ApplicationGatewayUrlPathMapResponse',
'ApplicationGatewayWebApplicationFirewallConfigurationResponse',
'ApplicationSecurityGroupResponse',
'AzureFirewallApplicationRuleCollectionResponse',
'AzureFirewallApplicationRuleProtocolResponse',
'AzureFirewallApplicationRuleResponse',
'AzureFirewallIPConfigurationResponse',
'AzureFirewallNatRCActionResponse',
'AzureFirewallNatRuleCollectionResponse',
'AzureFirewallNatRuleResponse',
'AzureFirewallNetworkRuleCollectionResponse',
'AzureFirewallNetworkRuleResponse',
'AzureFirewallRCActionResponse',
'BackendAddressPoolResponse',
'BackendPoolResponse',
'BackendPoolsSettingsResponse',
'BackendResponse',
'BastionHostIPConfigurationResponse',
'BgpPeerStatusResponseResult',
'BgpSettingsResponse',
'CacheConfigurationResponse',
'ConnectionMonitorDestinationResponse',
'ConnectionMonitorSourceResponse',
'ContainerNetworkInterfaceConfigurationResponse',
'ContainerNetworkInterfaceIpConfigurationResponse',
'ContainerNetworkInterfaceResponse',
'ContainerResponse',
'CustomHttpsConfigurationResponse',
'DdosSettingsResponse',
'DelegationResponse',
'DevicePropertiesResponse',
'DhcpOptionsResponse',
'ExpressRouteCircuitAuthorizationResponse',
'ExpressRouteCircuitConnectionResponse',
'ExpressRouteCircuitPeeringConfigResponse',
'ExpressRouteCircuitPeeringIdResponse',
'ExpressRouteCircuitPeeringResponse',
'ExpressRouteCircuitServiceProviderPropertiesResponse',
'ExpressRouteCircuitSkuResponse',
'ExpressRouteCircuitStatsResponse',
'ExpressRouteConnectionIdResponse',
'ExpressRouteConnectionResponse',
'ExpressRouteGatewayPropertiesResponseAutoScaleConfiguration',
'ExpressRouteGatewayPropertiesResponseBounds',
'ExpressRouteLinkResponse',
'ForwardingConfigurationResponse',
'FrontendEndpointResponse',
'FrontendEndpointUpdateParametersResponseWebApplicationFirewallPolicyLink',
'FrontendIPConfigurationResponse',
'GatewayRouteResponseResult',
'HealthProbeSettingsModelResponse',
'HubVirtualNetworkConnectionResponse',
'IPConfigurationProfileResponse',
'IPConfigurationResponse',
'InboundNatPoolResponse',
'InboundNatRuleResponse',
'IpTagResponse',
'IpsecPolicyResponse',
'Ipv6ExpressRouteCircuitPeeringConfigResponse',
'KeyVaultCertificateSourceParametersResponseVault',
'LoadBalancerSkuResponse',
'LoadBalancingRuleResponse',
'LoadBalancingSettingsModelResponse',
'LocalNetworkGatewayResponse',
'ManagedServiceIdentityResponse',
'ManagedServiceIdentityResponseUserAssignedIdentities',
'MatchConditionResponse',
'MatchVariableResponse',
'NatGatewaySkuResponse',
'NetworkInterfaceDnsSettingsResponse',
'NetworkInterfaceIPConfigurationResponse',
'NetworkInterfaceResponse',
'NetworkInterfaceTapConfigurationResponse',
'NetworkSecurityGroupResponse',
'OutboundRuleResponse',
'P2SVpnServerConfigRadiusClientRootCertificateResponse',
'P2SVpnServerConfigRadiusServerRootCertificateResponse',
'P2SVpnServerConfigVpnClientRevokedCertificateResponse',
'P2SVpnServerConfigVpnClientRootCertificateResponse',
'P2SVpnServerConfigurationResponse',
'PacketCaptureFilterResponse',
'PacketCaptureStorageLocationResponse',
'PeerExpressRouteCircuitConnectionResponse',
'PolicySettingsResponse',
'PrivateEndpointConnectionResponse',
'PrivateEndpointResponse',
'PrivateLinkServiceConnectionResponse',
'PrivateLinkServiceConnectionStateResponse',
'PrivateLinkServiceIpConfigurationResponse',
'PrivateLinkServicePropertiesResponseAutoApproval',
'PrivateLinkServicePropertiesResponseVisibility',
'ProbeResponse',
'ProtocolCustomSettingsFormatResponse',
'PublicIPAddressDnsSettingsResponse',
'PublicIPAddressResponse',
'PublicIPAddressSkuResponse',
'PublicIPPrefixSkuResponse',
'RedirectConfigurationResponse',
'ReferencedPublicIpAddressResponse',
'ResourceNavigationLinkResponse',
'RouteFilterRuleResponse',
'RouteResponse',
'RouteTableResponse',
'RoutingRuleResponse',
'SecurityRuleResponse',
'ServiceAssociationLinkResponse',
'ServiceEndpointPolicyDefinitionResponse',
'ServiceEndpointPolicyResponse',
'ServiceEndpointPropertiesFormatResponse',
'SubResourceResponse',
'SubnetResponse',
'TunnelConnectionHealthResponse',
'VirtualHubIdResponse',
'VirtualHubRouteResponse',
'VirtualHubRouteTableResponse',
'VirtualNetworkGatewayIPConfigurationResponse',
'VirtualNetworkGatewayResponse',
'VirtualNetworkGatewaySkuResponse',
'VirtualNetworkPeeringResponse',
'VirtualNetworkTapResponse',
'VpnClientConfigurationResponse',
'VpnClientConnectionHealthDetailResponseResult',
'VpnClientConnectionHealthResponse',
'VpnClientRevokedCertificateResponse',
'VpnClientRootCertificateResponse',
'VpnConnectionResponse',
'WebApplicationFirewallCustomRuleResponse',
]
@pulumi.output_type
class AddressSpaceResponse(dict):
"""
AddressSpace contains an array of IP address ranges that can be used by subnets of the virtual network.
"""
def __init__(__self__, *,
address_prefixes: Optional[Sequence[str]] = None):
"""
AddressSpace contains an array of IP address ranges that can be used by subnets of the virtual network.
:param Sequence[str] address_prefixes: A list of address blocks reserved for this virtual network in CIDR notation.
"""
if address_prefixes is not None:
pulumi.set(__self__, "address_prefixes", address_prefixes)
@property
@pulumi.getter(name="addressPrefixes")
def address_prefixes(self) -> Optional[Sequence[str]]:
"""
A list of address blocks reserved for this virtual network in CIDR notation.
"""
return pulumi.get(self, "address_prefixes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayAuthenticationCertificateResponse(dict):
"""
Authentication certificates of an application gateway.
"""
def __init__(__self__, *,
data: Optional[str] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None,
type: Optional[str] = None):
"""
Authentication certificates of an application gateway.
:param str data: Certificate public data.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Name of the authentication certificate that is unique within an Application Gateway.
:param str provisioning_state: Provisioning state of the authentication certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str type: Type of the resource.
"""
if data is not None:
pulumi.set(__self__, "data", data)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def data(self) -> Optional[str]:
"""
Certificate public data.
"""
return pulumi.get(self, "data")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the authentication certificate that is unique within an Application Gateway.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the authentication certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayAutoscaleConfigurationResponse(dict):
"""
Application Gateway autoscale configuration.
"""
def __init__(__self__, *,
min_capacity: int,
max_capacity: Optional[int] = None):
"""
Application Gateway autoscale configuration.
:param int min_capacity: Lower bound on number of Application Gateway capacity.
:param int max_capacity: Upper bound on number of Application Gateway capacity.
"""
pulumi.set(__self__, "min_capacity", min_capacity)
if max_capacity is not None:
pulumi.set(__self__, "max_capacity", max_capacity)
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> int:
"""
Lower bound on number of Application Gateway capacity.
"""
return pulumi.get(self, "min_capacity")
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> Optional[int]:
"""
Upper bound on number of Application Gateway capacity.
"""
return pulumi.get(self, "max_capacity")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayBackendAddressPoolResponse(dict):
"""
Backend Address Pool of an application gateway.
"""
def __init__(__self__, *,
backend_addresses: Optional[Sequence['outputs.ApplicationGatewayBackendAddressResponse']] = None,
backend_ip_configurations: Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None,
type: Optional[str] = None):
"""
Backend Address Pool of an application gateway.
:param Sequence['ApplicationGatewayBackendAddressResponseArgs'] backend_addresses: Backend addresses.
:param Sequence['NetworkInterfaceIPConfigurationResponseArgs'] backend_ip_configurations: Collection of references to IPs defined in network interfaces.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Name of the backend address pool that is unique within an Application Gateway.
:param str provisioning_state: Provisioning state of the backend address pool resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str type: Type of the resource.
"""
if backend_addresses is not None:
pulumi.set(__self__, "backend_addresses", backend_addresses)
if backend_ip_configurations is not None:
pulumi.set(__self__, "backend_ip_configurations", backend_ip_configurations)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="backendAddresses")
def backend_addresses(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendAddressResponse']]:
"""
Backend addresses.
"""
return pulumi.get(self, "backend_addresses")
@property
@pulumi.getter(name="backendIPConfigurations")
def backend_ip_configurations(self) -> Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']]:
"""
Collection of references to IPs defined in network interfaces.
"""
return pulumi.get(self, "backend_ip_configurations")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the backend address pool that is unique within an Application Gateway.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the backend address pool resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayBackendAddressResponse(dict):
"""
Backend address of an application gateway.
"""
def __init__(__self__, *,
fqdn: Optional[str] = None,
ip_address: Optional[str] = None):
"""
Backend address of an application gateway.
:param str fqdn: Fully qualified domain name (FQDN).
:param str ip_address: IP address.
"""
if fqdn is not None:
pulumi.set(__self__, "fqdn", fqdn)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
@property
@pulumi.getter
def fqdn(self) -> Optional[str]:
"""
Fully qualified domain name (FQDN).
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
"""
IP address.
"""
return pulumi.get(self, "ip_address")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayBackendHealthHttpSettingsResponseResult(dict):
"""
Application gateway BackendHealthHttp settings.
"""
def __init__(__self__, *,
backend_http_settings: Optional['outputs.ApplicationGatewayBackendHttpSettingsResponse'] = None,
servers: Optional[Sequence['outputs.ApplicationGatewayBackendHealthServerResponseResult']] = None):
"""
Application gateway BackendHealthHttp settings.
:param 'ApplicationGatewayBackendHttpSettingsResponseArgs' backend_http_settings: Reference of an ApplicationGatewayBackendHttpSettings resource.
:param Sequence['ApplicationGatewayBackendHealthServerResponseArgs'] servers: List of ApplicationGatewayBackendHealthServer resources.
"""
if backend_http_settings is not None:
pulumi.set(__self__, "backend_http_settings", backend_http_settings)
if servers is not None:
pulumi.set(__self__, "servers", servers)
@property
@pulumi.getter(name="backendHttpSettings")
def backend_http_settings(self) -> Optional['outputs.ApplicationGatewayBackendHttpSettingsResponse']:
"""
Reference of an ApplicationGatewayBackendHttpSettings resource.
"""
return pulumi.get(self, "backend_http_settings")
@property
@pulumi.getter
def servers(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendHealthServerResponseResult']]:
"""
List of ApplicationGatewayBackendHealthServer resources.
"""
return pulumi.get(self, "servers")
@pulumi.output_type
class ApplicationGatewayBackendHealthServerResponseResult(dict):
"""
Application gateway backendhealth http settings.
"""
def __init__(__self__, *,
address: Optional[str] = None,
health: Optional[str] = None,
health_probe_log: Optional[str] = None,
ip_configuration: Optional['outputs.NetworkInterfaceIPConfigurationResponse'] = None):
"""
Application gateway backendhealth http settings.
:param str address: IP address or FQDN of backend server.
:param str health: Health of backend server.
:param str health_probe_log: Health Probe Log.
:param 'NetworkInterfaceIPConfigurationResponseArgs' ip_configuration: Reference of IP configuration of backend server.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if health is not None:
pulumi.set(__self__, "health", health)
if health_probe_log is not None:
pulumi.set(__self__, "health_probe_log", health_probe_log)
if ip_configuration is not None:
pulumi.set(__self__, "ip_configuration", ip_configuration)
@property
@pulumi.getter
def address(self) -> Optional[str]:
"""
IP address or FQDN of backend server.
"""
return pulumi.get(self, "address")
@property
@pulumi.getter
def health(self) -> Optional[str]:
"""
Health of backend server.
"""
return pulumi.get(self, "health")
@property
@pulumi.getter(name="healthProbeLog")
def health_probe_log(self) -> Optional[str]:
"""
Health Probe Log.
"""
return pulumi.get(self, "health_probe_log")
@property
@pulumi.getter(name="ipConfiguration")
def ip_configuration(self) -> Optional['outputs.NetworkInterfaceIPConfigurationResponse']:
"""
Reference of IP configuration of backend server.
"""
return pulumi.get(self, "ip_configuration")
@pulumi.output_type
class ApplicationGatewayBackendHttpSettingsResponse(dict):
"""
Backend address pool settings of an application gateway.
"""
def __init__(__self__, *,
affinity_cookie_name: Optional[str] = None,
authentication_certificates: Optional[Sequence['outputs.SubResourceResponse']] = None,
connection_draining: Optional['outputs.ApplicationGatewayConnectionDrainingResponse'] = None,
cookie_based_affinity: Optional[str] = None,
etag: Optional[str] = None,
host_name: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
path: Optional[str] = None,
pick_host_name_from_backend_address: Optional[bool] = None,
port: Optional[int] = None,
probe: Optional['outputs.SubResourceResponse'] = None,
probe_enabled: Optional[bool] = None,
protocol: Optional[str] = None,
provisioning_state: Optional[str] = None,
request_timeout: Optional[int] = None,
trusted_root_certificates: Optional[Sequence['outputs.SubResourceResponse']] = None,
type: Optional[str] = None):
"""
Backend address pool settings of an application gateway.
:param str affinity_cookie_name: Cookie name to use for the affinity cookie.
:param Sequence['SubResourceResponseArgs'] authentication_certificates: Array of references to application gateway authentication certificates.
:param 'ApplicationGatewayConnectionDrainingResponseArgs' connection_draining: Connection draining of the backend http settings resource.
:param str cookie_based_affinity: Cookie based affinity.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str host_name: Host header to be sent to the backend servers.
:param str id: Resource ID.
:param str name: Name of the backend http settings that is unique within an Application Gateway.
:param str path: Path which should be used as a prefix for all HTTP requests. Null means no path will be prefixed. Default value is null.
:param bool pick_host_name_from_backend_address: Whether to pick host header should be picked from the host name of the backend server. Default value is false.
:param int port: The destination port on the backend.
:param 'SubResourceResponseArgs' probe: Probe resource of an application gateway.
:param bool probe_enabled: Whether the probe is enabled. Default value is false.
:param str protocol: The protocol used to communicate with the backend.
:param str provisioning_state: Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param int request_timeout: Request timeout in seconds. Application Gateway will fail the request if response is not received within RequestTimeout. Acceptable values are from 1 second to 86400 seconds.
:param Sequence['SubResourceResponseArgs'] trusted_root_certificates: Array of references to application gateway trusted root certificates.
:param str type: Type of the resource.
"""
if affinity_cookie_name is not None:
pulumi.set(__self__, "affinity_cookie_name", affinity_cookie_name)
if authentication_certificates is not None:
pulumi.set(__self__, "authentication_certificates", authentication_certificates)
if connection_draining is not None:
pulumi.set(__self__, "connection_draining", connection_draining)
if cookie_based_affinity is not None:
pulumi.set(__self__, "cookie_based_affinity", cookie_based_affinity)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if host_name is not None:
pulumi.set(__self__, "host_name", host_name)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if path is not None:
pulumi.set(__self__, "path", path)
if pick_host_name_from_backend_address is not None:
pulumi.set(__self__, "pick_host_name_from_backend_address", pick_host_name_from_backend_address)
if port is not None:
pulumi.set(__self__, "port", port)
if probe is not None:
pulumi.set(__self__, "probe", probe)
if probe_enabled is not None:
pulumi.set(__self__, "probe_enabled", probe_enabled)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if request_timeout is not None:
pulumi.set(__self__, "request_timeout", request_timeout)
if trusted_root_certificates is not None:
pulumi.set(__self__, "trusted_root_certificates", trusted_root_certificates)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="affinityCookieName")
def affinity_cookie_name(self) -> Optional[str]:
"""
Cookie name to use for the affinity cookie.
"""
return pulumi.get(self, "affinity_cookie_name")
@property
@pulumi.getter(name="authenticationCertificates")
def authentication_certificates(self) -> Optional[Sequence['outputs.SubResourceResponse']]:
"""
Array of references to application gateway authentication certificates.
"""
return pulumi.get(self, "authentication_certificates")
@property
@pulumi.getter(name="connectionDraining")
def connection_draining(self) -> Optional['outputs.ApplicationGatewayConnectionDrainingResponse']:
"""
Connection draining of the backend http settings resource.
"""
return pulumi.get(self, "connection_draining")
@property
@pulumi.getter(name="cookieBasedAffinity")
def cookie_based_affinity(self) -> Optional[str]:
"""
Cookie based affinity.
"""
return pulumi.get(self, "cookie_based_affinity")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="hostName")
def host_name(self) -> Optional[str]:
"""
Host header to be sent to the backend servers.
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the backend http settings that is unique within an Application Gateway.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def path(self) -> Optional[str]:
"""
Path which should be used as a prefix for all HTTP requests. Null means no path will be prefixed. Default value is null.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter(name="pickHostNameFromBackendAddress")
def pick_host_name_from_backend_address(self) -> Optional[bool]:
"""
Whether to pick host header should be picked from the host name of the backend server. Default value is false.
"""
return pulumi.get(self, "pick_host_name_from_backend_address")
@property
@pulumi.getter
def port(self) -> Optional[int]:
"""
The destination port on the backend.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def probe(self) -> Optional['outputs.SubResourceResponse']:
"""
Probe resource of an application gateway.
"""
return pulumi.get(self, "probe")
@property
@pulumi.getter(name="probeEnabled")
def probe_enabled(self) -> Optional[bool]:
"""
Whether the probe is enabled. Default value is false.
"""
return pulumi.get(self, "probe_enabled")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
The protocol used to communicate with the backend.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="requestTimeout")
def request_timeout(self) -> Optional[int]:
"""
Request timeout in seconds. Application Gateway will fail the request if response is not received within RequestTimeout. Acceptable values are from 1 second to 86400 seconds.
"""
return pulumi.get(self, "request_timeout")
@property
@pulumi.getter(name="trustedRootCertificates")
def trusted_root_certificates(self) -> Optional[Sequence['outputs.SubResourceResponse']]:
"""
Array of references to application gateway trusted root certificates.
"""
return pulumi.get(self, "trusted_root_certificates")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayConnectionDrainingResponse(dict):
"""
Connection draining allows open connections to a backend server to be active for a specified time after the backend server got removed from the configuration.
"""
def __init__(__self__, *,
drain_timeout_in_sec: int,
enabled: bool):
"""
Connection draining allows open connections to a backend server to be active for a specified time after the backend server got removed from the configuration.
:param int drain_timeout_in_sec: The number of seconds connection draining is active. Acceptable values are from 1 second to 3600 seconds.
:param bool enabled: Whether connection draining is enabled or not.
"""
pulumi.set(__self__, "drain_timeout_in_sec", drain_timeout_in_sec)
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter(name="drainTimeoutInSec")
def drain_timeout_in_sec(self) -> int:
"""
The number of seconds connection draining is active. Acceptable values are from 1 second to 3600 seconds.
"""
return pulumi.get(self, "drain_timeout_in_sec")
@property
@pulumi.getter
def enabled(self) -> bool:
"""
Whether connection draining is enabled or not.
"""
return pulumi.get(self, "enabled")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayCustomErrorResponse(dict):
"""
Customer error of an application gateway.
"""
def __init__(__self__, *,
custom_error_page_url: Optional[str] = None,
status_code: Optional[str] = None):
"""
Customer error of an application gateway.
:param str custom_error_page_url: Error page URL of the application gateway customer error.
:param str status_code: Status code of the application gateway customer error.
"""
if custom_error_page_url is not None:
pulumi.set(__self__, "custom_error_page_url", custom_error_page_url)
if status_code is not None:
pulumi.set(__self__, "status_code", status_code)
@property
@pulumi.getter(name="customErrorPageUrl")
def custom_error_page_url(self) -> Optional[str]:
"""
Error page URL of the application gateway customer error.
"""
return pulumi.get(self, "custom_error_page_url")
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> Optional[str]:
"""
Status code of the application gateway customer error.
"""
return pulumi.get(self, "status_code")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayFirewallDisabledRuleGroupResponse(dict):
"""
Allows to disable rules within a rule group or an entire rule group.
"""
def __init__(__self__, *,
rule_group_name: str,
rules: Optional[Sequence[int]] = None):
"""
Allows to disable rules within a rule group or an entire rule group.
:param str rule_group_name: The name of the rule group that will be disabled.
:param Sequence[int] rules: The list of rules that will be disabled. If null, all rules of the rule group will be disabled.
"""
pulumi.set(__self__, "rule_group_name", rule_group_name)
if rules is not None:
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter(name="ruleGroupName")
def rule_group_name(self) -> str:
"""
The name of the rule group that will be disabled.
"""
return pulumi.get(self, "rule_group_name")
@property
@pulumi.getter
def rules(self) -> Optional[Sequence[int]]:
"""
The list of rules that will be disabled. If null, all rules of the rule group will be disabled.
"""
return pulumi.get(self, "rules")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayFirewallExclusionResponse(dict):
"""
Allow to exclude some variable satisfy the condition for the WAF check.
"""
def __init__(__self__, *,
match_variable: str,
selector: str,
selector_match_operator: str):
"""
Allow to exclude some variable satisfy the condition for the WAF check.
:param str match_variable: The variable to be excluded.
:param str selector: When matchVariable is a collection, operator used to specify which elements in the collection this exclusion applies to.
:param str selector_match_operator: When matchVariable is a collection, operate on the selector to specify which elements in the collection this exclusion applies to.
"""
pulumi.set(__self__, "match_variable", match_variable)
pulumi.set(__self__, "selector", selector)
pulumi.set(__self__, "selector_match_operator", selector_match_operator)
@property
@pulumi.getter(name="matchVariable")
def match_variable(self) -> str:
"""
The variable to be excluded.
"""
return pulumi.get(self, "match_variable")
@property
@pulumi.getter
def selector(self) -> str:
"""
When matchVariable is a collection, operator used to specify which elements in the collection this exclusion applies to.
"""
return pulumi.get(self, "selector")
@property
@pulumi.getter(name="selectorMatchOperator")
def selector_match_operator(self) -> str:
"""
When matchVariable is a collection, operate on the selector to specify which elements in the collection this exclusion applies to.
"""
return pulumi.get(self, "selector_match_operator")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayFrontendIPConfigurationResponse(dict):
"""
Frontend IP configuration of an application gateway.
"""
def __init__(__self__, *,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
private_ip_address: Optional[str] = None,
private_ip_allocation_method: Optional[str] = None,
provisioning_state: Optional[str] = None,
public_ip_address: Optional['outputs.SubResourceResponse'] = None,
subnet: Optional['outputs.SubResourceResponse'] = None,
type: Optional[str] = None):
"""
Frontend IP configuration of an application gateway.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Name of the frontend IP configuration that is unique within an Application Gateway.
:param str private_ip_address: PrivateIPAddress of the network interface IP Configuration.
:param str private_ip_allocation_method: The private IP address allocation method.
:param str provisioning_state: Provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param 'SubResourceResponseArgs' public_ip_address: Reference of the PublicIP resource.
:param 'SubResourceResponseArgs' subnet: Reference of the subnet resource.
:param str type: Type of the resource.
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip_address is not None:
pulumi.set(__self__, "private_ip_address", private_ip_address)
if private_ip_allocation_method is not None:
pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address is not None:
pulumi.set(__self__, "public_ip_address", public_ip_address)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the frontend IP configuration that is unique within an Application Gateway.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateIPAddress")
def private_ip_address(self) -> Optional[str]:
"""
PrivateIPAddress of the network interface IP Configuration.
"""
return pulumi.get(self, "private_ip_address")
@property
@pulumi.getter(name="privateIPAllocationMethod")
def private_ip_allocation_method(self) -> Optional[str]:
"""
The private IP address allocation method.
"""
return pulumi.get(self, "private_ip_allocation_method")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddress")
def public_ip_address(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference of the PublicIP resource.
"""
return pulumi.get(self, "public_ip_address")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference of the subnet resource.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayFrontendPortResponse(dict):
"""
Frontend port of an application gateway.
"""
def __init__(__self__, *,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
port: Optional[int] = None,
provisioning_state: Optional[str] = None,
type: Optional[str] = None):
"""
Frontend port of an application gateway.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Name of the frontend port that is unique within an Application Gateway.
:param int port: Frontend port.
:param str provisioning_state: Provisioning state of the frontend port resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str type: Type of the resource.
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if port is not None:
pulumi.set(__self__, "port", port)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the frontend port that is unique within an Application Gateway.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def port(self) -> Optional[int]:
"""
Frontend port.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the frontend port resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayHeaderConfigurationResponse(dict):
"""
Header configuration of the Actions set in Application Gateway.
"""
def __init__(__self__, *,
header_name: Optional[str] = None,
header_value: Optional[str] = None):
"""
Header configuration of the Actions set in Application Gateway.
:param str header_name: Header name of the header configuration.
:param str header_value: Header value of the header configuration.
"""
if header_name is not None:
pulumi.set(__self__, "header_name", header_name)
if header_value is not None:
pulumi.set(__self__, "header_value", header_value)
@property
@pulumi.getter(name="headerName")
def header_name(self) -> Optional[str]:
"""
Header name of the header configuration.
"""
return pulumi.get(self, "header_name")
@property
@pulumi.getter(name="headerValue")
def header_value(self) -> Optional[str]:
"""
Header value of the header configuration.
"""
return pulumi.get(self, "header_value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayHttpListenerResponse(dict):
"""
Http listener of an application gateway.
"""
def __init__(__self__, *,
custom_error_configurations: Optional[Sequence['outputs.ApplicationGatewayCustomErrorResponse']] = None,
etag: Optional[str] = None,
frontend_ip_configuration: Optional['outputs.SubResourceResponse'] = None,
frontend_port: Optional['outputs.SubResourceResponse'] = None,
host_name: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
protocol: Optional[str] = None,
provisioning_state: Optional[str] = None,
require_server_name_indication: Optional[bool] = None,
ssl_certificate: Optional['outputs.SubResourceResponse'] = None,
type: Optional[str] = None):
"""
Http listener of an application gateway.
:param Sequence['ApplicationGatewayCustomErrorResponseArgs'] custom_error_configurations: Custom error configurations of the HTTP listener.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param 'SubResourceResponseArgs' frontend_ip_configuration: Frontend IP configuration resource of an application gateway.
:param 'SubResourceResponseArgs' frontend_port: Frontend port resource of an application gateway.
:param str host_name: Host name of HTTP listener.
:param str id: Resource ID.
:param str name: Name of the HTTP listener that is unique within an Application Gateway.
:param str protocol: Protocol of the HTTP listener.
:param str provisioning_state: Provisioning state of the HTTP listener resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param bool require_server_name_indication: Applicable only if protocol is https. Enables SNI for multi-hosting.
:param 'SubResourceResponseArgs' ssl_certificate: SSL certificate resource of an application gateway.
:param str type: Type of the resource.
"""
if custom_error_configurations is not None:
pulumi.set(__self__, "custom_error_configurations", custom_error_configurations)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if frontend_ip_configuration is not None:
pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration)
if frontend_port is not None:
pulumi.set(__self__, "frontend_port", frontend_port)
if host_name is not None:
pulumi.set(__self__, "host_name", host_name)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if require_server_name_indication is not None:
pulumi.set(__self__, "require_server_name_indication", require_server_name_indication)
if ssl_certificate is not None:
pulumi.set(__self__, "ssl_certificate", ssl_certificate)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="customErrorConfigurations")
def custom_error_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayCustomErrorResponse']]:
"""
Custom error configurations of the HTTP listener.
"""
return pulumi.get(self, "custom_error_configurations")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="frontendIPConfiguration")
def frontend_ip_configuration(self) -> Optional['outputs.SubResourceResponse']:
"""
Frontend IP configuration resource of an application gateway.
"""
return pulumi.get(self, "frontend_ip_configuration")
@property
@pulumi.getter(name="frontendPort")
def frontend_port(self) -> Optional['outputs.SubResourceResponse']:
"""
Frontend port resource of an application gateway.
"""
return pulumi.get(self, "frontend_port")
@property
@pulumi.getter(name="hostName")
def host_name(self) -> Optional[str]:
"""
Host name of HTTP listener.
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the HTTP listener that is unique within an Application Gateway.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
Protocol of the HTTP listener.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the HTTP listener resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="requireServerNameIndication")
def require_server_name_indication(self) -> Optional[bool]:
"""
Applicable only if protocol is https. Enables SNI for multi-hosting.
"""
return pulumi.get(self, "require_server_name_indication")
@property
@pulumi.getter(name="sslCertificate")
def ssl_certificate(self) -> Optional['outputs.SubResourceResponse']:
"""
SSL certificate resource of an application gateway.
"""
return pulumi.get(self, "ssl_certificate")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayIPConfigurationResponse(dict):
"""
IP configuration of an application gateway. Currently 1 public and 1 private IP configuration is allowed.
"""
def __init__(__self__, *,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None,
subnet: Optional['outputs.SubResourceResponse'] = None,
type: Optional[str] = None):
"""
IP configuration of an application gateway. Currently 1 public and 1 private IP configuration is allowed.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Name of the IP configuration that is unique within an Application Gateway.
:param str provisioning_state: Provisioning state of the application gateway subnet resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param 'SubResourceResponseArgs' subnet: Reference of the subnet resource. A subnet from where application gateway gets its private address.
:param str type: Type of the resource.
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the IP configuration that is unique within an Application Gateway.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the application gateway subnet resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference of the subnet resource. A subnet from where application gateway gets its private address.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayPathRuleResponse(dict):
"""
Path rule of URL path map of an application gateway.
"""
def __init__(__self__, *,
backend_address_pool: Optional['outputs.SubResourceResponse'] = None,
backend_http_settings: Optional['outputs.SubResourceResponse'] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
paths: Optional[Sequence[str]] = None,
provisioning_state: Optional[str] = None,
redirect_configuration: Optional['outputs.SubResourceResponse'] = None,
rewrite_rule_set: Optional['outputs.SubResourceResponse'] = None,
type: Optional[str] = None):
"""
Path rule of URL path map of an application gateway.
:param 'SubResourceResponseArgs' backend_address_pool: Backend address pool resource of URL path map path rule.
:param 'SubResourceResponseArgs' backend_http_settings: Backend http settings resource of URL path map path rule.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Name of the path rule that is unique within an Application Gateway.
:param Sequence[str] paths: Path rules of URL path map.
:param str provisioning_state: Path rule of URL path map resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param 'SubResourceResponseArgs' redirect_configuration: Redirect configuration resource of URL path map path rule.
:param 'SubResourceResponseArgs' rewrite_rule_set: Rewrite rule set resource of URL path map path rule.
:param str type: Type of the resource.
"""
if backend_address_pool is not None:
pulumi.set(__self__, "backend_address_pool", backend_address_pool)
if backend_http_settings is not None:
pulumi.set(__self__, "backend_http_settings", backend_http_settings)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if paths is not None:
pulumi.set(__self__, "paths", paths)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if redirect_configuration is not None:
pulumi.set(__self__, "redirect_configuration", redirect_configuration)
if rewrite_rule_set is not None:
pulumi.set(__self__, "rewrite_rule_set", rewrite_rule_set)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="backendAddressPool")
def backend_address_pool(self) -> Optional['outputs.SubResourceResponse']:
"""
Backend address pool resource of URL path map path rule.
"""
return pulumi.get(self, "backend_address_pool")
@property
@pulumi.getter(name="backendHttpSettings")
def backend_http_settings(self) -> Optional['outputs.SubResourceResponse']:
"""
Backend http settings resource of URL path map path rule.
"""
return pulumi.get(self, "backend_http_settings")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the path rule that is unique within an Application Gateway.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def paths(self) -> Optional[Sequence[str]]:
"""
Path rules of URL path map.
"""
return pulumi.get(self, "paths")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Path rule of URL path map resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="redirectConfiguration")
def redirect_configuration(self) -> Optional['outputs.SubResourceResponse']:
"""
Redirect configuration resource of URL path map path rule.
"""
return pulumi.get(self, "redirect_configuration")
@property
@pulumi.getter(name="rewriteRuleSet")
def rewrite_rule_set(self) -> Optional['outputs.SubResourceResponse']:
"""
Rewrite rule set resource of URL path map path rule.
"""
return pulumi.get(self, "rewrite_rule_set")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayProbeHealthResponseMatchResponse(dict):
"""
Application gateway probe health response match.
"""
def __init__(__self__, *,
body: Optional[str] = None,
status_codes: Optional[Sequence[str]] = None):
"""
Application gateway probe health response match.
:param str body: Body that must be contained in the health response. Default value is empty.
:param Sequence[str] status_codes: Allowed ranges of healthy status codes. Default range of healthy status codes is 200-399.
"""
if body is not None:
pulumi.set(__self__, "body", body)
if status_codes is not None:
pulumi.set(__self__, "status_codes", status_codes)
@property
@pulumi.getter
def body(self) -> Optional[str]:
"""
Body that must be contained in the health response. Default value is empty.
"""
return pulumi.get(self, "body")
@property
@pulumi.getter(name="statusCodes")
def status_codes(self) -> Optional[Sequence[str]]:
"""
Allowed ranges of healthy status codes. Default range of healthy status codes is 200-399.
"""
return pulumi.get(self, "status_codes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayProbeResponse(dict):
"""
Probe of the application gateway.
"""
def __init__(__self__, *,
etag: Optional[str] = None,
host: Optional[str] = None,
id: Optional[str] = None,
interval: Optional[int] = None,
match: Optional['outputs.ApplicationGatewayProbeHealthResponseMatchResponse'] = None,
min_servers: Optional[int] = None,
name: Optional[str] = None,
path: Optional[str] = None,
pick_host_name_from_backend_http_settings: Optional[bool] = None,
port: Optional[int] = None,
protocol: Optional[str] = None,
provisioning_state: Optional[str] = None,
timeout: Optional[int] = None,
type: Optional[str] = None,
unhealthy_threshold: Optional[int] = None):
"""
Probe of the application gateway.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str host: Host name to send the probe to.
:param str id: Resource ID.
:param int interval: The probing interval in seconds. This is the time interval between two consecutive probes. Acceptable values are from 1 second to 86400 seconds.
:param 'ApplicationGatewayProbeHealthResponseMatchResponseArgs' match: Criterion for classifying a healthy probe response.
:param int min_servers: Minimum number of servers that are always marked healthy. Default value is 0.
:param str name: Name of the probe that is unique within an Application Gateway.
:param str path: Relative path of probe. Valid path starts from '/'. Probe is sent to <Protocol>://<host>:<port><path>.
:param bool pick_host_name_from_backend_http_settings: Whether the host header should be picked from the backend http settings. Default value is false.
:param int port: Custom port which will be used for probing the backend servers. The valid value ranges from 1 to 65535. In case not set, port from http settings will be used. This property is valid for Standard_v2 and WAF_v2 only.
:param str protocol: The protocol used for the probe.
:param str provisioning_state: Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param int timeout: The probe timeout in seconds. Probe marked as failed if valid response is not received with this timeout period. Acceptable values are from 1 second to 86400 seconds.
:param str type: Type of the resource.
:param int unhealthy_threshold: The probe retry count. Backend server is marked down after consecutive probe failure count reaches UnhealthyThreshold. Acceptable values are from 1 second to 20.
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if host is not None:
pulumi.set(__self__, "host", host)
if id is not None:
pulumi.set(__self__, "id", id)
if interval is not None:
pulumi.set(__self__, "interval", interval)
if match is not None:
pulumi.set(__self__, "match", match)
if min_servers is not None:
pulumi.set(__self__, "min_servers", min_servers)
if name is not None:
pulumi.set(__self__, "name", name)
if path is not None:
pulumi.set(__self__, "path", path)
if pick_host_name_from_backend_http_settings is not None:
pulumi.set(__self__, "pick_host_name_from_backend_http_settings", pick_host_name_from_backend_http_settings)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
if type is not None:
pulumi.set(__self__, "type", type)
if unhealthy_threshold is not None:
pulumi.set(__self__, "unhealthy_threshold", unhealthy_threshold)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def host(self) -> Optional[str]:
"""
Host name to send the probe to.
"""
return pulumi.get(self, "host")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def interval(self) -> Optional[int]:
"""
The probing interval in seconds. This is the time interval between two consecutive probes. Acceptable values are from 1 second to 86400 seconds.
"""
return pulumi.get(self, "interval")
@property
@pulumi.getter
def match(self) -> Optional['outputs.ApplicationGatewayProbeHealthResponseMatchResponse']:
"""
Criterion for classifying a healthy probe response.
"""
return pulumi.get(self, "match")
@property
@pulumi.getter(name="minServers")
def min_servers(self) -> Optional[int]:
"""
Minimum number of servers that are always marked healthy. Default value is 0.
"""
return pulumi.get(self, "min_servers")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the probe that is unique within an Application Gateway.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def path(self) -> Optional[str]:
"""
Relative path of probe. Valid path starts from '/'. Probe is sent to <Protocol>://<host>:<port><path>.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter(name="pickHostNameFromBackendHttpSettings")
def pick_host_name_from_backend_http_settings(self) -> Optional[bool]:
"""
Whether the host header should be picked from the backend http settings. Default value is false.
"""
return pulumi.get(self, "pick_host_name_from_backend_http_settings")
@property
@pulumi.getter
def port(self) -> Optional[int]:
"""
Custom port which will be used for probing the backend servers. The valid value ranges from 1 to 65535. In case not set, port from http settings will be used. This property is valid for Standard_v2 and WAF_v2 only.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
The protocol used for the probe.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def timeout(self) -> Optional[int]:
"""
The probe timeout in seconds. Probe marked as failed if valid response is not received with this timeout period. Acceptable values are from 1 second to 86400 seconds.
"""
return pulumi.get(self, "timeout")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="unhealthyThreshold")
def unhealthy_threshold(self) -> Optional[int]:
"""
The probe retry count. Backend server is marked down after consecutive probe failure count reaches UnhealthyThreshold. Acceptable values are from 1 second to 20.
"""
return pulumi.get(self, "unhealthy_threshold")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayRedirectConfigurationResponse(dict):
"""
Redirect configuration of an application gateway.
"""
def __init__(__self__, *,
etag: Optional[str] = None,
id: Optional[str] = None,
include_path: Optional[bool] = None,
include_query_string: Optional[bool] = None,
name: Optional[str] = None,
path_rules: Optional[Sequence['outputs.SubResourceResponse']] = None,
redirect_type: Optional[str] = None,
request_routing_rules: Optional[Sequence['outputs.SubResourceResponse']] = None,
target_listener: Optional['outputs.SubResourceResponse'] = None,
target_url: Optional[str] = None,
type: Optional[str] = None,
url_path_maps: Optional[Sequence['outputs.SubResourceResponse']] = None):
"""
Redirect configuration of an application gateway.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param bool include_path: Include path in the redirected url.
:param bool include_query_string: Include query string in the redirected url.
:param str name: Name of the redirect configuration that is unique within an Application Gateway.
:param Sequence['SubResourceResponseArgs'] path_rules: Path rules specifying redirect configuration.
:param str redirect_type: HTTP redirection type.
:param Sequence['SubResourceResponseArgs'] request_routing_rules: Request routing specifying redirect configuration.
:param 'SubResourceResponseArgs' target_listener: Reference to a listener to redirect the request to.
:param str target_url: Url to redirect the request to.
:param str type: Type of the resource.
:param Sequence['SubResourceResponseArgs'] url_path_maps: Url path maps specifying default redirect configuration.
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if include_path is not None:
pulumi.set(__self__, "include_path", include_path)
if include_query_string is not None:
pulumi.set(__self__, "include_query_string", include_query_string)
if name is not None:
pulumi.set(__self__, "name", name)
if path_rules is not None:
pulumi.set(__self__, "path_rules", path_rules)
if redirect_type is not None:
pulumi.set(__self__, "redirect_type", redirect_type)
if request_routing_rules is not None:
pulumi.set(__self__, "request_routing_rules", request_routing_rules)
if target_listener is not None:
pulumi.set(__self__, "target_listener", target_listener)
if target_url is not None:
pulumi.set(__self__, "target_url", target_url)
if type is not None:
pulumi.set(__self__, "type", type)
if url_path_maps is not None:
pulumi.set(__self__, "url_path_maps", url_path_maps)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="includePath")
def include_path(self) -> Optional[bool]:
"""
Include path in the redirected url.
"""
return pulumi.get(self, "include_path")
@property
@pulumi.getter(name="includeQueryString")
def include_query_string(self) -> Optional[bool]:
"""
Include query string in the redirected url.
"""
return pulumi.get(self, "include_query_string")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the redirect configuration that is unique within an Application Gateway.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="pathRules")
def path_rules(self) -> Optional[Sequence['outputs.SubResourceResponse']]:
"""
Path rules specifying redirect configuration.
"""
return pulumi.get(self, "path_rules")
@property
@pulumi.getter(name="redirectType")
def redirect_type(self) -> Optional[str]:
"""
HTTP redirection type.
"""
return pulumi.get(self, "redirect_type")
@property
@pulumi.getter(name="requestRoutingRules")
def request_routing_rules(self) -> Optional[Sequence['outputs.SubResourceResponse']]:
"""
Request routing specifying redirect configuration.
"""
return pulumi.get(self, "request_routing_rules")
@property
@pulumi.getter(name="targetListener")
def target_listener(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference to a listener to redirect the request to.
"""
return pulumi.get(self, "target_listener")
@property
@pulumi.getter(name="targetUrl")
def target_url(self) -> Optional[str]:
"""
Url to redirect the request to.
"""
return pulumi.get(self, "target_url")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="urlPathMaps")
def url_path_maps(self) -> Optional[Sequence['outputs.SubResourceResponse']]:
"""
Url path maps specifying default redirect configuration.
"""
return pulumi.get(self, "url_path_maps")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayRequestRoutingRuleResponse(dict):
"""
Request routing rule of an application gateway.
"""
def __init__(__self__, *,
backend_address_pool: Optional['outputs.SubResourceResponse'] = None,
backend_http_settings: Optional['outputs.SubResourceResponse'] = None,
etag: Optional[str] = None,
http_listener: Optional['outputs.SubResourceResponse'] = None,
id: Optional[str] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None,
redirect_configuration: Optional['outputs.SubResourceResponse'] = None,
rewrite_rule_set: Optional['outputs.SubResourceResponse'] = None,
rule_type: Optional[str] = None,
type: Optional[str] = None,
url_path_map: Optional['outputs.SubResourceResponse'] = None):
"""
Request routing rule of an application gateway.
:param 'SubResourceResponseArgs' backend_address_pool: Backend address pool resource of the application gateway.
:param 'SubResourceResponseArgs' backend_http_settings: Backend http settings resource of the application gateway.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param 'SubResourceResponseArgs' http_listener: Http listener resource of the application gateway.
:param str id: Resource ID.
:param str name: Name of the request routing rule that is unique within an Application Gateway.
:param str provisioning_state: Provisioning state of the request routing rule resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param 'SubResourceResponseArgs' redirect_configuration: Redirect configuration resource of the application gateway.
:param 'SubResourceResponseArgs' rewrite_rule_set: Rewrite Rule Set resource in Basic rule of the application gateway.
:param str rule_type: Rule type.
:param str type: Type of the resource.
:param 'SubResourceResponseArgs' url_path_map: URL path map resource of the application gateway.
"""
if backend_address_pool is not None:
pulumi.set(__self__, "backend_address_pool", backend_address_pool)
if backend_http_settings is not None:
pulumi.set(__self__, "backend_http_settings", backend_http_settings)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if http_listener is not None:
pulumi.set(__self__, "http_listener", http_listener)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if redirect_configuration is not None:
pulumi.set(__self__, "redirect_configuration", redirect_configuration)
if rewrite_rule_set is not None:
pulumi.set(__self__, "rewrite_rule_set", rewrite_rule_set)
if rule_type is not None:
pulumi.set(__self__, "rule_type", rule_type)
if type is not None:
pulumi.set(__self__, "type", type)
if url_path_map is not None:
pulumi.set(__self__, "url_path_map", url_path_map)
@property
@pulumi.getter(name="backendAddressPool")
def backend_address_pool(self) -> Optional['outputs.SubResourceResponse']:
"""
Backend address pool resource of the application gateway.
"""
return pulumi.get(self, "backend_address_pool")
@property
@pulumi.getter(name="backendHttpSettings")
def backend_http_settings(self) -> Optional['outputs.SubResourceResponse']:
"""
Backend http settings resource of the application gateway.
"""
return pulumi.get(self, "backend_http_settings")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="httpListener")
def http_listener(self) -> Optional['outputs.SubResourceResponse']:
"""
Http listener resource of the application gateway.
"""
return pulumi.get(self, "http_listener")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the request routing rule that is unique within an Application Gateway.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the request routing rule resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="redirectConfiguration")
def redirect_configuration(self) -> Optional['outputs.SubResourceResponse']:
"""
Redirect configuration resource of the application gateway.
"""
return pulumi.get(self, "redirect_configuration")
@property
@pulumi.getter(name="rewriteRuleSet")
def rewrite_rule_set(self) -> Optional['outputs.SubResourceResponse']:
"""
Rewrite Rule Set resource in Basic rule of the application gateway.
"""
return pulumi.get(self, "rewrite_rule_set")
@property
@pulumi.getter(name="ruleType")
def rule_type(self) -> Optional[str]:
"""
Rule type.
"""
return pulumi.get(self, "rule_type")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="urlPathMap")
def url_path_map(self) -> Optional['outputs.SubResourceResponse']:
"""
URL path map resource of the application gateway.
"""
return pulumi.get(self, "url_path_map")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayResponse(dict):
"""
Application gateway resource.
"""
def __init__(__self__, *,
name: str,
operational_state: str,
type: str,
authentication_certificates: Optional[Sequence['outputs.ApplicationGatewayAuthenticationCertificateResponse']] = None,
autoscale_configuration: Optional['outputs.ApplicationGatewayAutoscaleConfigurationResponse'] = None,
backend_address_pools: Optional[Sequence['outputs.ApplicationGatewayBackendAddressPoolResponse']] = None,
backend_http_settings_collection: Optional[Sequence['outputs.ApplicationGatewayBackendHttpSettingsResponse']] = None,
custom_error_configurations: Optional[Sequence['outputs.ApplicationGatewayCustomErrorResponse']] = None,
enable_fips: Optional[bool] = None,
enable_http2: Optional[bool] = None,
etag: Optional[str] = None,
firewall_policy: Optional['outputs.SubResourceResponse'] = None,
frontend_ip_configurations: Optional[Sequence['outputs.ApplicationGatewayFrontendIPConfigurationResponse']] = None,
frontend_ports: Optional[Sequence['outputs.ApplicationGatewayFrontendPortResponse']] = None,
gateway_ip_configurations: Optional[Sequence['outputs.ApplicationGatewayIPConfigurationResponse']] = None,
http_listeners: Optional[Sequence['outputs.ApplicationGatewayHttpListenerResponse']] = None,
id: Optional[str] = None,
identity: Optional['outputs.ManagedServiceIdentityResponse'] = None,
location: Optional[str] = None,
probes: Optional[Sequence['outputs.ApplicationGatewayProbeResponse']] = None,
provisioning_state: Optional[str] = None,
redirect_configurations: Optional[Sequence['outputs.ApplicationGatewayRedirectConfigurationResponse']] = None,
request_routing_rules: Optional[Sequence['outputs.ApplicationGatewayRequestRoutingRuleResponse']] = None,
resource_guid: Optional[str] = None,
rewrite_rule_sets: Optional[Sequence['outputs.ApplicationGatewayRewriteRuleSetResponse']] = None,
sku: Optional['outputs.ApplicationGatewaySkuResponse'] = None,
ssl_certificates: Optional[Sequence['outputs.ApplicationGatewaySslCertificateResponse']] = None,
ssl_policy: Optional['outputs.ApplicationGatewaySslPolicyResponse'] = None,
tags: Optional[Mapping[str, str]] = None,
trusted_root_certificates: Optional[Sequence['outputs.ApplicationGatewayTrustedRootCertificateResponse']] = None,
url_path_maps: Optional[Sequence['outputs.ApplicationGatewayUrlPathMapResponse']] = None,
web_application_firewall_configuration: Optional['outputs.ApplicationGatewayWebApplicationFirewallConfigurationResponse'] = None,
zones: Optional[Sequence[str]] = None):
"""
Application gateway resource.
:param str name: Resource name.
:param str operational_state: Operational state of the application gateway resource.
:param str type: Resource type.
:param Sequence['ApplicationGatewayAuthenticationCertificateResponseArgs'] authentication_certificates: Authentication certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param 'ApplicationGatewayAutoscaleConfigurationResponseArgs' autoscale_configuration: Autoscale Configuration.
:param Sequence['ApplicationGatewayBackendAddressPoolResponseArgs'] backend_address_pools: Backend address pool of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param Sequence['ApplicationGatewayBackendHttpSettingsResponseArgs'] backend_http_settings_collection: Backend http settings of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param Sequence['ApplicationGatewayCustomErrorResponseArgs'] custom_error_configurations: Custom error configurations of the application gateway resource.
:param bool enable_fips: Whether FIPS is enabled on the application gateway resource.
:param bool enable_http2: Whether HTTP2 is enabled on the application gateway resource.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param 'SubResourceResponseArgs' firewall_policy: Reference of the FirewallPolicy resource.
:param Sequence['ApplicationGatewayFrontendIPConfigurationResponseArgs'] frontend_ip_configurations: Frontend IP addresses of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param Sequence['ApplicationGatewayFrontendPortResponseArgs'] frontend_ports: Frontend ports of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param Sequence['ApplicationGatewayIPConfigurationResponseArgs'] gateway_ip_configurations: Subnets of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param Sequence['ApplicationGatewayHttpListenerResponseArgs'] http_listeners: Http listeners of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param str id: Resource ID.
:param 'ManagedServiceIdentityResponseArgs' identity: The identity of the application gateway, if configured.
:param str location: Resource location.
:param Sequence['ApplicationGatewayProbeResponseArgs'] probes: Probes of the application gateway resource.
:param str provisioning_state: Provisioning state of the application gateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param Sequence['ApplicationGatewayRedirectConfigurationResponseArgs'] redirect_configurations: Redirect configurations of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param Sequence['ApplicationGatewayRequestRoutingRuleResponseArgs'] request_routing_rules: Request routing rules of the application gateway resource.
:param str resource_guid: Resource GUID property of the application gateway resource.
:param Sequence['ApplicationGatewayRewriteRuleSetResponseArgs'] rewrite_rule_sets: Rewrite rules for the application gateway resource.
:param 'ApplicationGatewaySkuResponseArgs' sku: SKU of the application gateway resource.
:param Sequence['ApplicationGatewaySslCertificateResponseArgs'] ssl_certificates: SSL certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param 'ApplicationGatewaySslPolicyResponseArgs' ssl_policy: SSL policy of the application gateway resource.
:param Mapping[str, str] tags: Resource tags.
:param Sequence['ApplicationGatewayTrustedRootCertificateResponseArgs'] trusted_root_certificates: Trusted Root certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param Sequence['ApplicationGatewayUrlPathMapResponseArgs'] url_path_maps: URL path map of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param 'ApplicationGatewayWebApplicationFirewallConfigurationResponseArgs' web_application_firewall_configuration: Web application firewall configuration.
:param Sequence[str] zones: A list of availability zones denoting where the resource needs to come from.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "operational_state", operational_state)
pulumi.set(__self__, "type", type)
if authentication_certificates is not None:
pulumi.set(__self__, "authentication_certificates", authentication_certificates)
if autoscale_configuration is not None:
pulumi.set(__self__, "autoscale_configuration", autoscale_configuration)
if backend_address_pools is not None:
pulumi.set(__self__, "backend_address_pools", backend_address_pools)
if backend_http_settings_collection is not None:
pulumi.set(__self__, "backend_http_settings_collection", backend_http_settings_collection)
if custom_error_configurations is not None:
pulumi.set(__self__, "custom_error_configurations", custom_error_configurations)
if enable_fips is not None:
pulumi.set(__self__, "enable_fips", enable_fips)
if enable_http2 is not None:
pulumi.set(__self__, "enable_http2", enable_http2)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if firewall_policy is not None:
pulumi.set(__self__, "firewall_policy", firewall_policy)
if frontend_ip_configurations is not None:
pulumi.set(__self__, "frontend_ip_configurations", frontend_ip_configurations)
if frontend_ports is not None:
pulumi.set(__self__, "frontend_ports", frontend_ports)
if gateway_ip_configurations is not None:
pulumi.set(__self__, "gateway_ip_configurations", gateway_ip_configurations)
if http_listeners is not None:
pulumi.set(__self__, "http_listeners", http_listeners)
if id is not None:
pulumi.set(__self__, "id", id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if probes is not None:
pulumi.set(__self__, "probes", probes)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if redirect_configurations is not None:
pulumi.set(__self__, "redirect_configurations", redirect_configurations)
if request_routing_rules is not None:
pulumi.set(__self__, "request_routing_rules", request_routing_rules)
if resource_guid is not None:
pulumi.set(__self__, "resource_guid", resource_guid)
if rewrite_rule_sets is not None:
pulumi.set(__self__, "rewrite_rule_sets", rewrite_rule_sets)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if ssl_certificates is not None:
pulumi.set(__self__, "ssl_certificates", ssl_certificates)
if ssl_policy is not None:
pulumi.set(__self__, "ssl_policy", ssl_policy)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if trusted_root_certificates is not None:
pulumi.set(__self__, "trusted_root_certificates", trusted_root_certificates)
if url_path_maps is not None:
pulumi.set(__self__, "url_path_maps", url_path_maps)
if web_application_firewall_configuration is not None:
pulumi.set(__self__, "web_application_firewall_configuration", web_application_firewall_configuration)
if zones is not None:
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="operationalState")
def operational_state(self) -> str:
"""
Operational state of the application gateway resource.
"""
return pulumi.get(self, "operational_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="authenticationCertificates")
def authentication_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewayAuthenticationCertificateResponse']]:
"""
Authentication certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "authentication_certificates")
@property
@pulumi.getter(name="autoscaleConfiguration")
def autoscale_configuration(self) -> Optional['outputs.ApplicationGatewayAutoscaleConfigurationResponse']:
"""
Autoscale Configuration.
"""
return pulumi.get(self, "autoscale_configuration")
@property
@pulumi.getter(name="backendAddressPools")
def backend_address_pools(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendAddressPoolResponse']]:
"""
Backend address pool of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "backend_address_pools")
@property
@pulumi.getter(name="backendHttpSettingsCollection")
def backend_http_settings_collection(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendHttpSettingsResponse']]:
"""
Backend http settings of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "backend_http_settings_collection")
@property
@pulumi.getter(name="customErrorConfigurations")
def custom_error_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayCustomErrorResponse']]:
"""
Custom error configurations of the application gateway resource.
"""
return pulumi.get(self, "custom_error_configurations")
@property
@pulumi.getter(name="enableFips")
def enable_fips(self) -> Optional[bool]:
"""
Whether FIPS is enabled on the application gateway resource.
"""
return pulumi.get(self, "enable_fips")
@property
@pulumi.getter(name="enableHttp2")
def enable_http2(self) -> Optional[bool]:
"""
Whether HTTP2 is enabled on the application gateway resource.
"""
return pulumi.get(self, "enable_http2")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="firewallPolicy")
def firewall_policy(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference of the FirewallPolicy resource.
"""
return pulumi.get(self, "firewall_policy")
@property
@pulumi.getter(name="frontendIPConfigurations")
def frontend_ip_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayFrontendIPConfigurationResponse']]:
"""
Frontend IP addresses of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter(name="frontendPorts")
def frontend_ports(self) -> Optional[Sequence['outputs.ApplicationGatewayFrontendPortResponse']]:
"""
Frontend ports of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "frontend_ports")
@property
@pulumi.getter(name="gatewayIPConfigurations")
def gateway_ip_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayIPConfigurationResponse']]:
"""
Subnets of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "gateway_ip_configurations")
@property
@pulumi.getter(name="httpListeners")
def http_listeners(self) -> Optional[Sequence['outputs.ApplicationGatewayHttpListenerResponse']]:
"""
Http listeners of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "http_listeners")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']:
"""
The identity of the application gateway, if configured.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def probes(self) -> Optional[Sequence['outputs.ApplicationGatewayProbeResponse']]:
"""
Probes of the application gateway resource.
"""
return pulumi.get(self, "probes")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the application gateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="redirectConfigurations")
def redirect_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayRedirectConfigurationResponse']]:
"""
Redirect configurations of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "redirect_configurations")
@property
@pulumi.getter(name="requestRoutingRules")
def request_routing_rules(self) -> Optional[Sequence['outputs.ApplicationGatewayRequestRoutingRuleResponse']]:
"""
Request routing rules of the application gateway resource.
"""
return pulumi.get(self, "request_routing_rules")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
Resource GUID property of the application gateway resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="rewriteRuleSets")
def rewrite_rule_sets(self) -> Optional[Sequence['outputs.ApplicationGatewayRewriteRuleSetResponse']]:
"""
Rewrite rules for the application gateway resource.
"""
return pulumi.get(self, "rewrite_rule_sets")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.ApplicationGatewaySkuResponse']:
"""
SKU of the application gateway resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="sslCertificates")
def ssl_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewaySslCertificateResponse']]:
"""
SSL certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "ssl_certificates")
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> Optional['outputs.ApplicationGatewaySslPolicyResponse']:
"""
SSL policy of the application gateway resource.
"""
return pulumi.get(self, "ssl_policy")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="trustedRootCertificates")
def trusted_root_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewayTrustedRootCertificateResponse']]:
"""
Trusted Root certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "trusted_root_certificates")
@property
@pulumi.getter(name="urlPathMaps")
def url_path_maps(self) -> Optional[Sequence['outputs.ApplicationGatewayUrlPathMapResponse']]:
"""
URL path map of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "url_path_maps")
@property
@pulumi.getter(name="webApplicationFirewallConfiguration")
def web_application_firewall_configuration(self) -> Optional['outputs.ApplicationGatewayWebApplicationFirewallConfigurationResponse']:
"""
Web application firewall configuration.
"""
return pulumi.get(self, "web_application_firewall_configuration")
@property
@pulumi.getter
def zones(self) -> Optional[Sequence[str]]:
"""
A list of availability zones denoting where the resource needs to come from.
"""
return pulumi.get(self, "zones")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayRewriteRuleActionSetResponse(dict):
"""
Set of actions in the Rewrite Rule in Application Gateway.
"""
def __init__(__self__, *,
request_header_configurations: Optional[Sequence['outputs.ApplicationGatewayHeaderConfigurationResponse']] = None,
response_header_configurations: Optional[Sequence['outputs.ApplicationGatewayHeaderConfigurationResponse']] = None):
"""
Set of actions in the Rewrite Rule in Application Gateway.
:param Sequence['ApplicationGatewayHeaderConfigurationResponseArgs'] request_header_configurations: Request Header Actions in the Action Set.
:param Sequence['ApplicationGatewayHeaderConfigurationResponseArgs'] response_header_configurations: Response Header Actions in the Action Set.
"""
if request_header_configurations is not None:
pulumi.set(__self__, "request_header_configurations", request_header_configurations)
if response_header_configurations is not None:
pulumi.set(__self__, "response_header_configurations", response_header_configurations)
@property
@pulumi.getter(name="requestHeaderConfigurations")
def request_header_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayHeaderConfigurationResponse']]:
"""
Request Header Actions in the Action Set.
"""
return pulumi.get(self, "request_header_configurations")
@property
@pulumi.getter(name="responseHeaderConfigurations")
def response_header_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayHeaderConfigurationResponse']]:
"""
Response Header Actions in the Action Set.
"""
return pulumi.get(self, "response_header_configurations")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayRewriteRuleConditionResponse(dict):
"""
Set of conditions in the Rewrite Rule in Application Gateway.
"""
def __init__(__self__, *,
ignore_case: Optional[bool] = None,
negate: Optional[bool] = None,
pattern: Optional[str] = None,
variable: Optional[str] = None):
"""
Set of conditions in the Rewrite Rule in Application Gateway.
:param bool ignore_case: Setting this paramter to truth value with force the pattern to do a case in-sensitive comparison.
:param bool negate: Setting this value as truth will force to check the negation of the condition given by the user.
:param str pattern: The pattern, either fixed string or regular expression, that evaluates the truthfulness of the condition.
:param str variable: The condition parameter of the RewriteRuleCondition.
"""
if ignore_case is not None:
pulumi.set(__self__, "ignore_case", ignore_case)
if negate is not None:
pulumi.set(__self__, "negate", negate)
if pattern is not None:
pulumi.set(__self__, "pattern", pattern)
if variable is not None:
pulumi.set(__self__, "variable", variable)
@property
@pulumi.getter(name="ignoreCase")
def ignore_case(self) -> Optional[bool]:
"""
Setting this paramter to truth value with force the pattern to do a case in-sensitive comparison.
"""
return pulumi.get(self, "ignore_case")
@property
@pulumi.getter
def negate(self) -> Optional[bool]:
"""
Setting this value as truth will force to check the negation of the condition given by the user.
"""
return pulumi.get(self, "negate")
@property
@pulumi.getter
def pattern(self) -> Optional[str]:
"""
The pattern, either fixed string or regular expression, that evaluates the truthfulness of the condition.
"""
return pulumi.get(self, "pattern")
@property
@pulumi.getter
def variable(self) -> Optional[str]:
"""
The condition parameter of the RewriteRuleCondition.
"""
return pulumi.get(self, "variable")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayRewriteRuleResponse(dict):
"""
Rewrite rule of an application gateway.
"""
def __init__(__self__, *,
action_set: Optional['outputs.ApplicationGatewayRewriteRuleActionSetResponse'] = None,
conditions: Optional[Sequence['outputs.ApplicationGatewayRewriteRuleConditionResponse']] = None,
name: Optional[str] = None,
rule_sequence: Optional[int] = None):
"""
Rewrite rule of an application gateway.
:param 'ApplicationGatewayRewriteRuleActionSetResponseArgs' action_set: Set of actions to be done as part of the rewrite Rule.
:param Sequence['ApplicationGatewayRewriteRuleConditionResponseArgs'] conditions: Conditions based on which the action set execution will be evaluated.
:param str name: Name of the rewrite rule that is unique within an Application Gateway.
:param int rule_sequence: Rule Sequence of the rewrite rule that determines the order of execution of a particular rule in a RewriteRuleSet.
"""
if action_set is not None:
pulumi.set(__self__, "action_set", action_set)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if name is not None:
pulumi.set(__self__, "name", name)
if rule_sequence is not None:
pulumi.set(__self__, "rule_sequence", rule_sequence)
@property
@pulumi.getter(name="actionSet")
def action_set(self) -> Optional['outputs.ApplicationGatewayRewriteRuleActionSetResponse']:
"""
Set of actions to be done as part of the rewrite Rule.
"""
return pulumi.get(self, "action_set")
@property
@pulumi.getter
def conditions(self) -> Optional[Sequence['outputs.ApplicationGatewayRewriteRuleConditionResponse']]:
"""
Conditions based on which the action set execution will be evaluated.
"""
return pulumi.get(self, "conditions")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the rewrite rule that is unique within an Application Gateway.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="ruleSequence")
def rule_sequence(self) -> Optional[int]:
"""
Rule Sequence of the rewrite rule that determines the order of execution of a particular rule in a RewriteRuleSet.
"""
return pulumi.get(self, "rule_sequence")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayRewriteRuleSetResponse(dict):
"""
Rewrite rule set of an application gateway.
"""
def __init__(__self__, *,
etag: str,
provisioning_state: str,
id: Optional[str] = None,
name: Optional[str] = None,
rewrite_rules: Optional[Sequence['outputs.ApplicationGatewayRewriteRuleResponse']] = None):
"""
Rewrite rule set of an application gateway.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str provisioning_state: Provisioning state of the rewrite rule set resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str id: Resource ID.
:param str name: Name of the rewrite rule set that is unique within an Application Gateway.
:param Sequence['ApplicationGatewayRewriteRuleResponseArgs'] rewrite_rules: Rewrite rules in the rewrite rule set.
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "provisioning_state", provisioning_state)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if rewrite_rules is not None:
pulumi.set(__self__, "rewrite_rules", rewrite_rules)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the rewrite rule set resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the rewrite rule set that is unique within an Application Gateway.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="rewriteRules")
def rewrite_rules(self) -> Optional[Sequence['outputs.ApplicationGatewayRewriteRuleResponse']]:
"""
Rewrite rules in the rewrite rule set.
"""
return pulumi.get(self, "rewrite_rules")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewaySkuResponse(dict):
"""
SKU of an application gateway.
"""
def __init__(__self__, *,
capacity: Optional[int] = None,
name: Optional[str] = None,
tier: Optional[str] = None):
"""
SKU of an application gateway.
:param int capacity: Capacity (instance count) of an application gateway.
:param str name: Name of an application gateway SKU.
:param str tier: Tier of an application gateway.
"""
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if name is not None:
pulumi.set(__self__, "name", name)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def capacity(self) -> Optional[int]:
"""
Capacity (instance count) of an application gateway.
"""
return pulumi.get(self, "capacity")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of an application gateway SKU.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tier(self) -> Optional[str]:
"""
Tier of an application gateway.
"""
return pulumi.get(self, "tier")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewaySslCertificateResponse(dict):
"""
SSL certificates of an application gateway.
"""
def __init__(__self__, *,
data: Optional[str] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
key_vault_secret_id: Optional[str] = None,
name: Optional[str] = None,
password: Optional[str] = None,
provisioning_state: Optional[str] = None,
public_cert_data: Optional[str] = None,
type: Optional[str] = None):
"""
SSL certificates of an application gateway.
:param str data: Base-64 encoded pfx certificate. Only applicable in PUT Request.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str key_vault_secret_id: Secret Id of (base-64 encoded unencrypted pfx) 'Secret' or 'Certificate' object stored in KeyVault.
:param str name: Name of the SSL certificate that is unique within an Application Gateway.
:param str password: Password for the pfx file specified in data. Only applicable in PUT request.
:param str provisioning_state: Provisioning state of the SSL certificate resource Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str public_cert_data: Base-64 encoded Public cert data corresponding to pfx specified in data. Only applicable in GET request.
:param str type: Type of the resource.
"""
if data is not None:
pulumi.set(__self__, "data", data)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if key_vault_secret_id is not None:
pulumi.set(__self__, "key_vault_secret_id", key_vault_secret_id)
if name is not None:
pulumi.set(__self__, "name", name)
if password is not None:
pulumi.set(__self__, "password", password)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_cert_data is not None:
pulumi.set(__self__, "public_cert_data", public_cert_data)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def data(self) -> Optional[str]:
"""
Base-64 encoded pfx certificate. Only applicable in PUT Request.
"""
return pulumi.get(self, "data")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="keyVaultSecretId")
def key_vault_secret_id(self) -> Optional[str]:
"""
Secret Id of (base-64 encoded unencrypted pfx) 'Secret' or 'Certificate' object stored in KeyVault.
"""
return pulumi.get(self, "key_vault_secret_id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the SSL certificate that is unique within an Application Gateway.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def password(self) -> Optional[str]:
"""
Password for the pfx file specified in data. Only applicable in PUT request.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the SSL certificate resource Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicCertData")
def public_cert_data(self) -> Optional[str]:
"""
Base-64 encoded Public cert data corresponding to pfx specified in data. Only applicable in GET request.
"""
return pulumi.get(self, "public_cert_data")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewaySslPolicyResponse(dict):
"""
Application Gateway Ssl policy.
"""
def __init__(__self__, *,
cipher_suites: Optional[Sequence[str]] = None,
disabled_ssl_protocols: Optional[Sequence[str]] = None,
min_protocol_version: Optional[str] = None,
policy_name: Optional[str] = None,
policy_type: Optional[str] = None):
"""
Application Gateway Ssl policy.
:param Sequence[str] cipher_suites: Ssl cipher suites to be enabled in the specified order to application gateway.
:param Sequence[str] disabled_ssl_protocols: Ssl protocols to be disabled on application gateway.
:param str min_protocol_version: Minimum version of Ssl protocol to be supported on application gateway.
:param str policy_name: Name of Ssl predefined policy.
:param str policy_type: Type of Ssl Policy.
"""
if cipher_suites is not None:
pulumi.set(__self__, "cipher_suites", cipher_suites)
if disabled_ssl_protocols is not None:
pulumi.set(__self__, "disabled_ssl_protocols", disabled_ssl_protocols)
if min_protocol_version is not None:
pulumi.set(__self__, "min_protocol_version", min_protocol_version)
if policy_name is not None:
pulumi.set(__self__, "policy_name", policy_name)
if policy_type is not None:
pulumi.set(__self__, "policy_type", policy_type)
@property
@pulumi.getter(name="cipherSuites")
def cipher_suites(self) -> Optional[Sequence[str]]:
"""
Ssl cipher suites to be enabled in the specified order to application gateway.
"""
return pulumi.get(self, "cipher_suites")
@property
@pulumi.getter(name="disabledSslProtocols")
def disabled_ssl_protocols(self) -> Optional[Sequence[str]]:
"""
Ssl protocols to be disabled on application gateway.
"""
return pulumi.get(self, "disabled_ssl_protocols")
@property
@pulumi.getter(name="minProtocolVersion")
def min_protocol_version(self) -> Optional[str]:
"""
Minimum version of Ssl protocol to be supported on application gateway.
"""
return pulumi.get(self, "min_protocol_version")
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> Optional[str]:
"""
Name of Ssl predefined policy.
"""
return pulumi.get(self, "policy_name")
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> Optional[str]:
"""
Type of Ssl Policy.
"""
return pulumi.get(self, "policy_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayTrustedRootCertificateResponse(dict):
"""
Trusted Root certificates of an application gateway.
"""
def __init__(__self__, *,
data: Optional[str] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
key_vault_secret_id: Optional[str] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None,
type: Optional[str] = None):
"""
Trusted Root certificates of an application gateway.
:param str data: Certificate public data.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str key_vault_secret_id: Secret Id of (base-64 encoded unencrypted pfx) 'Secret' or 'Certificate' object stored in KeyVault.
:param str name: Name of the trusted root certificate that is unique within an Application Gateway.
:param str provisioning_state: Provisioning state of the trusted root certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str type: Type of the resource.
"""
if data is not None:
pulumi.set(__self__, "data", data)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if key_vault_secret_id is not None:
pulumi.set(__self__, "key_vault_secret_id", key_vault_secret_id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def data(self) -> Optional[str]:
"""
Certificate public data.
"""
return pulumi.get(self, "data")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="keyVaultSecretId")
def key_vault_secret_id(self) -> Optional[str]:
"""
Secret Id of (base-64 encoded unencrypted pfx) 'Secret' or 'Certificate' object stored in KeyVault.
"""
return pulumi.get(self, "key_vault_secret_id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the trusted root certificate that is unique within an Application Gateway.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the trusted root certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayUrlPathMapResponse(dict):
"""
UrlPathMaps give a url path to the backend mapping information for PathBasedRouting.
"""
def __init__(__self__, *,
default_backend_address_pool: Optional['outputs.SubResourceResponse'] = None,
default_backend_http_settings: Optional['outputs.SubResourceResponse'] = None,
default_redirect_configuration: Optional['outputs.SubResourceResponse'] = None,
default_rewrite_rule_set: Optional['outputs.SubResourceResponse'] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
path_rules: Optional[Sequence['outputs.ApplicationGatewayPathRuleResponse']] = None,
provisioning_state: Optional[str] = None,
type: Optional[str] = None):
"""
UrlPathMaps give a url path to the backend mapping information for PathBasedRouting.
:param 'SubResourceResponseArgs' default_backend_address_pool: Default backend address pool resource of URL path map.
:param 'SubResourceResponseArgs' default_backend_http_settings: Default backend http settings resource of URL path map.
:param 'SubResourceResponseArgs' default_redirect_configuration: Default redirect configuration resource of URL path map.
:param 'SubResourceResponseArgs' default_rewrite_rule_set: Default Rewrite rule set resource of URL path map.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Name of the URL path map that is unique within an Application Gateway.
:param Sequence['ApplicationGatewayPathRuleResponseArgs'] path_rules: Path rule of URL path map resource.
:param str provisioning_state: Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str type: Type of the resource.
"""
if default_backend_address_pool is not None:
pulumi.set(__self__, "default_backend_address_pool", default_backend_address_pool)
if default_backend_http_settings is not None:
pulumi.set(__self__, "default_backend_http_settings", default_backend_http_settings)
if default_redirect_configuration is not None:
pulumi.set(__self__, "default_redirect_configuration", default_redirect_configuration)
if default_rewrite_rule_set is not None:
pulumi.set(__self__, "default_rewrite_rule_set", default_rewrite_rule_set)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if path_rules is not None:
pulumi.set(__self__, "path_rules", path_rules)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="defaultBackendAddressPool")
def default_backend_address_pool(self) -> Optional['outputs.SubResourceResponse']:
"""
Default backend address pool resource of URL path map.
"""
return pulumi.get(self, "default_backend_address_pool")
@property
@pulumi.getter(name="defaultBackendHttpSettings")
def default_backend_http_settings(self) -> Optional['outputs.SubResourceResponse']:
"""
Default backend http settings resource of URL path map.
"""
return pulumi.get(self, "default_backend_http_settings")
@property
@pulumi.getter(name="defaultRedirectConfiguration")
def default_redirect_configuration(self) -> Optional['outputs.SubResourceResponse']:
"""
Default redirect configuration resource of URL path map.
"""
return pulumi.get(self, "default_redirect_configuration")
@property
@pulumi.getter(name="defaultRewriteRuleSet")
def default_rewrite_rule_set(self) -> Optional['outputs.SubResourceResponse']:
"""
Default Rewrite rule set resource of URL path map.
"""
return pulumi.get(self, "default_rewrite_rule_set")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the URL path map that is unique within an Application Gateway.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="pathRules")
def path_rules(self) -> Optional[Sequence['outputs.ApplicationGatewayPathRuleResponse']]:
"""
Path rule of URL path map resource.
"""
return pulumi.get(self, "path_rules")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayWebApplicationFirewallConfigurationResponse(dict):
"""
Application gateway web application firewall configuration.
"""
def __init__(__self__, *,
enabled: bool,
firewall_mode: str,
rule_set_type: str,
rule_set_version: str,
disabled_rule_groups: Optional[Sequence['outputs.ApplicationGatewayFirewallDisabledRuleGroupResponse']] = None,
exclusions: Optional[Sequence['outputs.ApplicationGatewayFirewallExclusionResponse']] = None,
file_upload_limit_in_mb: Optional[int] = None,
max_request_body_size: Optional[int] = None,
max_request_body_size_in_kb: Optional[int] = None,
request_body_check: Optional[bool] = None):
"""
Application gateway web application firewall configuration.
:param bool enabled: Whether the web application firewall is enabled or not.
:param str firewall_mode: Web application firewall mode.
:param str rule_set_type: The type of the web application firewall rule set. Possible values are: 'OWASP'.
:param str rule_set_version: The version of the rule set type.
:param Sequence['ApplicationGatewayFirewallDisabledRuleGroupResponseArgs'] disabled_rule_groups: The disabled rule groups.
:param Sequence['ApplicationGatewayFirewallExclusionResponseArgs'] exclusions: The exclusion list.
:param int file_upload_limit_in_mb: Maximum file upload size in Mb for WAF.
:param int max_request_body_size: Maximum request body size for WAF.
:param int max_request_body_size_in_kb: Maximum request body size in Kb for WAF.
:param bool request_body_check: Whether allow WAF to check request Body.
"""
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "firewall_mode", firewall_mode)
pulumi.set(__self__, "rule_set_type", rule_set_type)
pulumi.set(__self__, "rule_set_version", rule_set_version)
if disabled_rule_groups is not None:
pulumi.set(__self__, "disabled_rule_groups", disabled_rule_groups)
if exclusions is not None:
pulumi.set(__self__, "exclusions", exclusions)
if file_upload_limit_in_mb is not None:
pulumi.set(__self__, "file_upload_limit_in_mb", file_upload_limit_in_mb)
if max_request_body_size is not None:
pulumi.set(__self__, "max_request_body_size", max_request_body_size)
if max_request_body_size_in_kb is not None:
pulumi.set(__self__, "max_request_body_size_in_kb", max_request_body_size_in_kb)
if request_body_check is not None:
pulumi.set(__self__, "request_body_check", request_body_check)
@property
@pulumi.getter
def enabled(self) -> bool:
"""
Whether the web application firewall is enabled or not.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="firewallMode")
def firewall_mode(self) -> str:
"""
Web application firewall mode.
"""
return pulumi.get(self, "firewall_mode")
@property
@pulumi.getter(name="ruleSetType")
def rule_set_type(self) -> str:
"""
The type of the web application firewall rule set. Possible values are: 'OWASP'.
"""
return pulumi.get(self, "rule_set_type")
@property
@pulumi.getter(name="ruleSetVersion")
def rule_set_version(self) -> str:
"""
The version of the rule set type.
"""
return pulumi.get(self, "rule_set_version")
@property
@pulumi.getter(name="disabledRuleGroups")
def disabled_rule_groups(self) -> Optional[Sequence['outputs.ApplicationGatewayFirewallDisabledRuleGroupResponse']]:
"""
The disabled rule groups.
"""
return pulumi.get(self, "disabled_rule_groups")
@property
@pulumi.getter
def exclusions(self) -> Optional[Sequence['outputs.ApplicationGatewayFirewallExclusionResponse']]:
"""
The exclusion list.
"""
return pulumi.get(self, "exclusions")
@property
@pulumi.getter(name="fileUploadLimitInMb")
def file_upload_limit_in_mb(self) -> Optional[int]:
"""
Maximum file upload size in Mb for WAF.
"""
return pulumi.get(self, "file_upload_limit_in_mb")
@property
@pulumi.getter(name="maxRequestBodySize")
def max_request_body_size(self) -> Optional[int]:
"""
Maximum request body size for WAF.
"""
return pulumi.get(self, "max_request_body_size")
@property
@pulumi.getter(name="maxRequestBodySizeInKb")
def max_request_body_size_in_kb(self) -> Optional[int]:
"""
Maximum request body size in Kb for WAF.
"""
return pulumi.get(self, "max_request_body_size_in_kb")
@property
@pulumi.getter(name="requestBodyCheck")
def request_body_check(self) -> Optional[bool]:
"""
Whether allow WAF to check request Body.
"""
return pulumi.get(self, "request_body_check")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationSecurityGroupResponse(dict):
"""
An application security group in a resource group.
"""
def __init__(__self__, *,
etag: str,
name: str,
provisioning_state: str,
resource_guid: str,
type: str,
id: Optional[str] = None,
location: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None):
"""
An application security group in a resource group.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str name: Resource name.
:param str provisioning_state: The provisioning state of the application security group resource. Possible values are: 'Succeeded', 'Updating', 'Deleting', and 'Failed'.
:param str resource_guid: The resource GUID property of the application security group resource. It uniquely identifies a resource, even if the user changes its name or migrate the resource across subscriptions or resource groups.
:param str type: Resource type.
:param str id: Resource ID.
:param str location: Resource location.
:param Mapping[str, str] tags: Resource tags.
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "resource_guid", resource_guid)
pulumi.set(__self__, "type", type)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the application security group resource. Possible values are: 'Succeeded', 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the application security group resource. It uniquely identifies a resource, even if the user changes its name or migrate the resource across subscriptions or resource groups.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureFirewallApplicationRuleCollectionResponse(dict):
"""
Application rule collection resource.
"""
def __init__(__self__, *,
etag: str,
provisioning_state: str,
action: Optional['outputs.AzureFirewallRCActionResponse'] = None,
id: Optional[str] = None,
name: Optional[str] = None,
priority: Optional[int] = None,
rules: Optional[Sequence['outputs.AzureFirewallApplicationRuleResponse']] = None):
"""
Application rule collection resource.
:param str etag: Gets a unique read-only string that changes whenever the resource is updated.
:param str provisioning_state: The provisioning state of the resource.
:param 'AzureFirewallRCActionResponseArgs' action: The action type of a rule collection.
:param str id: Resource ID.
:param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param int priority: Priority of the application rule collection resource.
:param Sequence['AzureFirewallApplicationRuleResponseArgs'] rules: Collection of rules used by a application rule collection.
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "provisioning_state", provisioning_state)
if action is not None:
pulumi.set(__self__, "action", action)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if rules is not None:
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter
def etag(self) -> str:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def action(self) -> Optional['outputs.AzureFirewallRCActionResponse']:
"""
The action type of a rule collection.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
"""
Priority of the application rule collection resource.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def rules(self) -> Optional[Sequence['outputs.AzureFirewallApplicationRuleResponse']]:
"""
Collection of rules used by a application rule collection.
"""
return pulumi.get(self, "rules")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureFirewallApplicationRuleProtocolResponse(dict):
"""
Properties of the application rule protocol.
"""
def __init__(__self__, *,
port: Optional[int] = None,
protocol_type: Optional[str] = None):
"""
Properties of the application rule protocol.
:param int port: Port number for the protocol, cannot be greater than 64000. This field is optional.
:param str protocol_type: Protocol type.
"""
if port is not None:
pulumi.set(__self__, "port", port)
if protocol_type is not None:
pulumi.set(__self__, "protocol_type", protocol_type)
@property
@pulumi.getter
def port(self) -> Optional[int]:
"""
Port number for the protocol, cannot be greater than 64000. This field is optional.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="protocolType")
def protocol_type(self) -> Optional[str]:
"""
Protocol type.
"""
return pulumi.get(self, "protocol_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureFirewallApplicationRuleResponse(dict):
"""
Properties of an application rule.
"""
def __init__(__self__, *,
description: Optional[str] = None,
fqdn_tags: Optional[Sequence[str]] = None,
name: Optional[str] = None,
protocols: Optional[Sequence['outputs.AzureFirewallApplicationRuleProtocolResponse']] = None,
source_addresses: Optional[Sequence[str]] = None,
target_fqdns: Optional[Sequence[str]] = None):
"""
Properties of an application rule.
:param str description: Description of the rule.
:param Sequence[str] fqdn_tags: List of FQDN Tags for this rule.
:param str name: Name of the application rule.
:param Sequence['AzureFirewallApplicationRuleProtocolResponseArgs'] protocols: Array of ApplicationRuleProtocols.
:param Sequence[str] source_addresses: List of source IP addresses for this rule.
:param Sequence[str] target_fqdns: List of FQDNs for this rule.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if fqdn_tags is not None:
pulumi.set(__self__, "fqdn_tags", fqdn_tags)
if name is not None:
pulumi.set(__self__, "name", name)
if protocols is not None:
pulumi.set(__self__, "protocols", protocols)
if source_addresses is not None:
pulumi.set(__self__, "source_addresses", source_addresses)
if target_fqdns is not None:
pulumi.set(__self__, "target_fqdns", target_fqdns)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description of the rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="fqdnTags")
def fqdn_tags(self) -> Optional[Sequence[str]]:
"""
List of FQDN Tags for this rule.
"""
return pulumi.get(self, "fqdn_tags")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the application rule.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocols(self) -> Optional[Sequence['outputs.AzureFirewallApplicationRuleProtocolResponse']]:
"""
Array of ApplicationRuleProtocols.
"""
return pulumi.get(self, "protocols")
@property
@pulumi.getter(name="sourceAddresses")
def source_addresses(self) -> Optional[Sequence[str]]:
"""
List of source IP addresses for this rule.
"""
return pulumi.get(self, "source_addresses")
@property
@pulumi.getter(name="targetFqdns")
def target_fqdns(self) -> Optional[Sequence[str]]:
"""
List of FQDNs for this rule.
"""
return pulumi.get(self, "target_fqdns")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureFirewallIPConfigurationResponse(dict):
"""
IP configuration of an Azure Firewall.
"""
def __init__(__self__, *,
etag: str,
private_ip_address: str,
provisioning_state: str,
id: Optional[str] = None,
name: Optional[str] = None,
public_ip_address: Optional['outputs.SubResourceResponse'] = None,
subnet: Optional['outputs.SubResourceResponse'] = None):
"""
IP configuration of an Azure Firewall.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str private_ip_address: The Firewall Internal Load Balancer IP to be used as the next hop in User Defined Routes.
:param str provisioning_state: The provisioning state of the resource.
:param str id: Resource ID.
:param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
:param 'SubResourceResponseArgs' public_ip_address: Reference of the PublicIP resource. This field is a mandatory input if subnet is not null.
:param 'SubResourceResponseArgs' subnet: Reference of the subnet resource. This resource must be named 'AzureFirewallSubnet'.
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "private_ip_address", private_ip_address)
pulumi.set(__self__, "provisioning_state", provisioning_state)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if public_ip_address is not None:
pulumi.set(__self__, "public_ip_address", public_ip_address)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="privateIPAddress")
def private_ip_address(self) -> str:
"""
The Firewall Internal Load Balancer IP to be used as the next hop in User Defined Routes.
"""
return pulumi.get(self, "private_ip_address")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="publicIPAddress")
def public_ip_address(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference of the PublicIP resource. This field is a mandatory input if subnet is not null.
"""
return pulumi.get(self, "public_ip_address")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference of the subnet resource. This resource must be named 'AzureFirewallSubnet'.
"""
return pulumi.get(self, "subnet")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureFirewallNatRCActionResponse(dict):
"""
AzureFirewall NAT Rule Collection Action.
"""
def __init__(__self__, *,
type: Optional[str] = None):
"""
AzureFirewall NAT Rule Collection Action.
:param str type: The type of action.
"""
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The type of action.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureFirewallNatRuleCollectionResponse(dict):
"""
NAT rule collection resource.
"""
def __init__(__self__, *,
etag: str,
provisioning_state: str,
action: Optional['outputs.AzureFirewallNatRCActionResponse'] = None,
id: Optional[str] = None,
name: Optional[str] = None,
priority: Optional[int] = None,
rules: Optional[Sequence['outputs.AzureFirewallNatRuleResponse']] = None):
"""
NAT rule collection resource.
:param str etag: Gets a unique read-only string that changes whenever the resource is updated.
:param str provisioning_state: The provisioning state of the resource.
:param 'AzureFirewallNatRCActionResponseArgs' action: The action type of a NAT rule collection.
:param str id: Resource ID.
:param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param int priority: Priority of the NAT rule collection resource.
:param Sequence['AzureFirewallNatRuleResponseArgs'] rules: Collection of rules used by a NAT rule collection.
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "provisioning_state", provisioning_state)
if action is not None:
pulumi.set(__self__, "action", action)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if rules is not None:
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter
def etag(self) -> str:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def action(self) -> Optional['outputs.AzureFirewallNatRCActionResponse']:
"""
The action type of a NAT rule collection.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
"""
Priority of the NAT rule collection resource.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def rules(self) -> Optional[Sequence['outputs.AzureFirewallNatRuleResponse']]:
"""
Collection of rules used by a NAT rule collection.
"""
return pulumi.get(self, "rules")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureFirewallNatRuleResponse(dict):
"""
Properties of a NAT rule.
"""
def __init__(__self__, *,
description: Optional[str] = None,
destination_addresses: Optional[Sequence[str]] = None,
destination_ports: Optional[Sequence[str]] = None,
name: Optional[str] = None,
protocols: Optional[Sequence[str]] = None,
source_addresses: Optional[Sequence[str]] = None,
translated_address: Optional[str] = None,
translated_port: Optional[str] = None):
"""
Properties of a NAT rule.
:param str description: Description of the rule.
:param Sequence[str] destination_addresses: List of destination IP addresses for this rule. Supports IP ranges, prefixes, and service tags.
:param Sequence[str] destination_ports: List of destination ports.
:param str name: Name of the NAT rule.
:param Sequence[str] protocols: Array of AzureFirewallNetworkRuleProtocols applicable to this NAT rule.
:param Sequence[str] source_addresses: List of source IP addresses for this rule.
:param str translated_address: The translated address for this NAT rule.
:param str translated_port: The translated port for this NAT rule.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if destination_addresses is not None:
pulumi.set(__self__, "destination_addresses", destination_addresses)
if destination_ports is not None:
pulumi.set(__self__, "destination_ports", destination_ports)
if name is not None:
pulumi.set(__self__, "name", name)
if protocols is not None:
pulumi.set(__self__, "protocols", protocols)
if source_addresses is not None:
pulumi.set(__self__, "source_addresses", source_addresses)
if translated_address is not None:
pulumi.set(__self__, "translated_address", translated_address)
if translated_port is not None:
pulumi.set(__self__, "translated_port", translated_port)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description of the rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="destinationAddresses")
def destination_addresses(self) -> Optional[Sequence[str]]:
"""
List of destination IP addresses for this rule. Supports IP ranges, prefixes, and service tags.
"""
return pulumi.get(self, "destination_addresses")
@property
@pulumi.getter(name="destinationPorts")
def destination_ports(self) -> Optional[Sequence[str]]:
"""
List of destination ports.
"""
return pulumi.get(self, "destination_ports")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the NAT rule.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocols(self) -> Optional[Sequence[str]]:
"""
Array of AzureFirewallNetworkRuleProtocols applicable to this NAT rule.
"""
return pulumi.get(self, "protocols")
@property
@pulumi.getter(name="sourceAddresses")
def source_addresses(self) -> Optional[Sequence[str]]:
"""
List of source IP addresses for this rule.
"""
return pulumi.get(self, "source_addresses")
@property
@pulumi.getter(name="translatedAddress")
def translated_address(self) -> Optional[str]:
"""
The translated address for this NAT rule.
"""
return pulumi.get(self, "translated_address")
@property
@pulumi.getter(name="translatedPort")
def translated_port(self) -> Optional[str]:
"""
The translated port for this NAT rule.
"""
return pulumi.get(self, "translated_port")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureFirewallNetworkRuleCollectionResponse(dict):
"""
Network rule collection resource.
"""
def __init__(__self__, *,
etag: str,
provisioning_state: str,
action: Optional['outputs.AzureFirewallRCActionResponse'] = None,
id: Optional[str] = None,
name: Optional[str] = None,
priority: Optional[int] = None,
rules: Optional[Sequence['outputs.AzureFirewallNetworkRuleResponse']] = None):
"""
Network rule collection resource.
:param str etag: Gets a unique read-only string that changes whenever the resource is updated.
:param str provisioning_state: The provisioning state of the resource.
:param 'AzureFirewallRCActionResponseArgs' action: The action type of a rule collection.
:param str id: Resource ID.
:param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param int priority: Priority of the network rule collection resource.
:param Sequence['AzureFirewallNetworkRuleResponseArgs'] rules: Collection of rules used by a network rule collection.
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "provisioning_state", provisioning_state)
if action is not None:
pulumi.set(__self__, "action", action)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if rules is not None:
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter
def etag(self) -> str:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def action(self) -> Optional['outputs.AzureFirewallRCActionResponse']:
"""
The action type of a rule collection.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
"""
Priority of the network rule collection resource.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def rules(self) -> Optional[Sequence['outputs.AzureFirewallNetworkRuleResponse']]:
"""
Collection of rules used by a network rule collection.
"""
return pulumi.get(self, "rules")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureFirewallNetworkRuleResponse(dict):
"""
Properties of the network rule.
"""
def __init__(__self__, *,
description: Optional[str] = None,
destination_addresses: Optional[Sequence[str]] = None,
destination_ports: Optional[Sequence[str]] = None,
name: Optional[str] = None,
protocols: Optional[Sequence[str]] = None,
source_addresses: Optional[Sequence[str]] = None):
"""
Properties of the network rule.
:param str description: Description of the rule.
:param Sequence[str] destination_addresses: List of destination IP addresses.
:param Sequence[str] destination_ports: List of destination ports.
:param str name: Name of the network rule.
:param Sequence[str] protocols: Array of AzureFirewallNetworkRuleProtocols.
:param Sequence[str] source_addresses: List of source IP addresses for this rule.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if destination_addresses is not None:
pulumi.set(__self__, "destination_addresses", destination_addresses)
if destination_ports is not None:
pulumi.set(__self__, "destination_ports", destination_ports)
if name is not None:
pulumi.set(__self__, "name", name)
if protocols is not None:
pulumi.set(__self__, "protocols", protocols)
if source_addresses is not None:
pulumi.set(__self__, "source_addresses", source_addresses)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description of the rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="destinationAddresses")
def destination_addresses(self) -> Optional[Sequence[str]]:
"""
List of destination IP addresses.
"""
return pulumi.get(self, "destination_addresses")
@property
@pulumi.getter(name="destinationPorts")
def destination_ports(self) -> Optional[Sequence[str]]:
"""
List of destination ports.
"""
return pulumi.get(self, "destination_ports")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the network rule.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocols(self) -> Optional[Sequence[str]]:
"""
Array of AzureFirewallNetworkRuleProtocols.
"""
return pulumi.get(self, "protocols")
@property
@pulumi.getter(name="sourceAddresses")
def source_addresses(self) -> Optional[Sequence[str]]:
"""
List of source IP addresses for this rule.
"""
return pulumi.get(self, "source_addresses")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureFirewallRCActionResponse(dict):
"""
Properties of the AzureFirewallRCAction.
"""
def __init__(__self__, *,
type: Optional[str] = None):
"""
Properties of the AzureFirewallRCAction.
:param str type: The type of action.
"""
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The type of action.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class BackendAddressPoolResponse(dict):
"""
Pool of backend IP addresses.
"""
def __init__(__self__, *,
backend_ip_configurations: Sequence['outputs.NetworkInterfaceIPConfigurationResponse'],
load_balancing_rules: Sequence['outputs.SubResourceResponse'],
outbound_rule: 'outputs.SubResourceResponse',
outbound_rules: Sequence['outputs.SubResourceResponse'],
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None):
"""
Pool of backend IP addresses.
:param Sequence['NetworkInterfaceIPConfigurationResponseArgs'] backend_ip_configurations: Gets collection of references to IP addresses defined in network interfaces.
:param Sequence['SubResourceResponseArgs'] load_balancing_rules: Gets load balancing rules that use this backend address pool.
:param 'SubResourceResponseArgs' outbound_rule: Gets outbound rules that use this backend address pool.
:param Sequence['SubResourceResponseArgs'] outbound_rules: Gets outbound rules that use this backend address pool.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str provisioning_state: Get provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
pulumi.set(__self__, "backend_ip_configurations", backend_ip_configurations)
pulumi.set(__self__, "load_balancing_rules", load_balancing_rules)
pulumi.set(__self__, "outbound_rule", outbound_rule)
pulumi.set(__self__, "outbound_rules", outbound_rules)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="backendIPConfigurations")
def backend_ip_configurations(self) -> Sequence['outputs.NetworkInterfaceIPConfigurationResponse']:
"""
Gets collection of references to IP addresses defined in network interfaces.
"""
return pulumi.get(self, "backend_ip_configurations")
@property
@pulumi.getter(name="loadBalancingRules")
def load_balancing_rules(self) -> Sequence['outputs.SubResourceResponse']:
"""
Gets load balancing rules that use this backend address pool.
"""
return pulumi.get(self, "load_balancing_rules")
@property
@pulumi.getter(name="outboundRule")
def outbound_rule(self) -> 'outputs.SubResourceResponse':
"""
Gets outbound rules that use this backend address pool.
"""
return pulumi.get(self, "outbound_rule")
@property
@pulumi.getter(name="outboundRules")
def outbound_rules(self) -> Sequence['outputs.SubResourceResponse']:
"""
Gets outbound rules that use this backend address pool.
"""
return pulumi.get(self, "outbound_rules")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Get provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class BackendPoolResponse(dict):
"""
A backend pool is a collection of backends that can be routed to.
"""
def __init__(__self__, *,
resource_state: str,
type: str,
backends: Optional[Sequence['outputs.BackendResponse']] = None,
health_probe_settings: Optional['outputs.SubResourceResponse'] = None,
id: Optional[str] = None,
load_balancing_settings: Optional['outputs.SubResourceResponse'] = None,
name: Optional[str] = None):
"""
A backend pool is a collection of backends that can be routed to.
:param str resource_state: Resource status.
:param str type: Resource type.
:param Sequence['BackendResponseArgs'] backends: The set of backends for this pool
:param 'SubResourceResponseArgs' health_probe_settings: L7 health probe settings for a backend pool
:param str id: Resource ID.
:param 'SubResourceResponseArgs' load_balancing_settings: Load balancing settings for a backend pool
:param str name: Resource name.
"""
pulumi.set(__self__, "resource_state", resource_state)
pulumi.set(__self__, "type", type)
if backends is not None:
pulumi.set(__self__, "backends", backends)
if health_probe_settings is not None:
pulumi.set(__self__, "health_probe_settings", health_probe_settings)
if id is not None:
pulumi.set(__self__, "id", id)
if load_balancing_settings is not None:
pulumi.set(__self__, "load_balancing_settings", load_balancing_settings)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="resourceState")
def resource_state(self) -> str:
"""
Resource status.
"""
return pulumi.get(self, "resource_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def backends(self) -> Optional[Sequence['outputs.BackendResponse']]:
"""
The set of backends for this pool
"""
return pulumi.get(self, "backends")
@property
@pulumi.getter(name="healthProbeSettings")
def health_probe_settings(self) -> Optional['outputs.SubResourceResponse']:
"""
L7 health probe settings for a backend pool
"""
return pulumi.get(self, "health_probe_settings")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="loadBalancingSettings")
def load_balancing_settings(self) -> Optional['outputs.SubResourceResponse']:
"""
Load balancing settings for a backend pool
"""
return pulumi.get(self, "load_balancing_settings")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class BackendPoolsSettingsResponse(dict):
"""
Settings that apply to all backend pools.
"""
def __init__(__self__, *,
enforce_certificate_name_check: Optional[str] = None):
"""
Settings that apply to all backend pools.
:param str enforce_certificate_name_check: Whether to enforce certificate name check on HTTPS requests to all backend pools. No effect on non-HTTPS requests.
"""
if enforce_certificate_name_check is None:
enforce_certificate_name_check = 'Enabled'
if enforce_certificate_name_check is not None:
pulumi.set(__self__, "enforce_certificate_name_check", enforce_certificate_name_check)
@property
@pulumi.getter(name="enforceCertificateNameCheck")
def enforce_certificate_name_check(self) -> Optional[str]:
"""
Whether to enforce certificate name check on HTTPS requests to all backend pools. No effect on non-HTTPS requests.
"""
return pulumi.get(self, "enforce_certificate_name_check")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class BackendResponse(dict):
"""
Backend address of a frontDoor load balancer.
"""
def __init__(__self__, *,
address: Optional[str] = None,
backend_host_header: Optional[str] = None,
enabled_state: Optional[str] = None,
http_port: Optional[int] = None,
https_port: Optional[int] = None,
priority: Optional[int] = None,
weight: Optional[int] = None):
"""
Backend address of a frontDoor load balancer.
:param str address: Location of the backend (IP address or FQDN)
:param str backend_host_header: The value to use as the host header sent to the backend. If blank or unspecified, this defaults to the incoming host.
:param str enabled_state: Whether to enable use of this backend. Permitted values are 'Enabled' or 'Disabled'
:param int http_port: The HTTP TCP port number. Must be between 1 and 65535.
:param int https_port: The HTTPS TCP port number. Must be between 1 and 65535.
:param int priority: Priority to use for load balancing. Higher priorities will not be used for load balancing if any lower priority backend is healthy.
:param int weight: Weight of this endpoint for load balancing purposes.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if backend_host_header is not None:
pulumi.set(__self__, "backend_host_header", backend_host_header)
if enabled_state is not None:
pulumi.set(__self__, "enabled_state", enabled_state)
if http_port is not None:
pulumi.set(__self__, "http_port", http_port)
if https_port is not None:
pulumi.set(__self__, "https_port", https_port)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def address(self) -> Optional[str]:
"""
Location of the backend (IP address or FQDN)
"""
return pulumi.get(self, "address")
@property
@pulumi.getter(name="backendHostHeader")
def backend_host_header(self) -> Optional[str]:
"""
The value to use as the host header sent to the backend. If blank or unspecified, this defaults to the incoming host.
"""
return pulumi.get(self, "backend_host_header")
@property
@pulumi.getter(name="enabledState")
def enabled_state(self) -> Optional[str]:
"""
Whether to enable use of this backend. Permitted values are 'Enabled' or 'Disabled'
"""
return pulumi.get(self, "enabled_state")
@property
@pulumi.getter(name="httpPort")
def http_port(self) -> Optional[int]:
"""
The HTTP TCP port number. Must be between 1 and 65535.
"""
return pulumi.get(self, "http_port")
@property
@pulumi.getter(name="httpsPort")
def https_port(self) -> Optional[int]:
"""
The HTTPS TCP port number. Must be between 1 and 65535.
"""
return pulumi.get(self, "https_port")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
"""
Priority to use for load balancing. Higher priorities will not be used for load balancing if any lower priority backend is healthy.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def weight(self) -> Optional[int]:
"""
Weight of this endpoint for load balancing purposes.
"""
return pulumi.get(self, "weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class BastionHostIPConfigurationResponse(dict):
"""
IP configuration of an Bastion Host.
"""
def __init__(__self__, *,
etag: str,
provisioning_state: str,
public_ip_address: 'outputs.SubResourceResponse',
subnet: 'outputs.SubResourceResponse',
type: str,
id: Optional[str] = None,
name: Optional[str] = None,
private_ip_allocation_method: Optional[str] = None):
"""
IP configuration of an Bastion Host.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str provisioning_state: The provisioning state of the resource.
:param 'SubResourceResponseArgs' public_ip_address: Reference of the PublicIP resource.
:param 'SubResourceResponseArgs' subnet: Reference of the subnet resource.
:param str type: Ip configuration type.
:param str id: Resource ID.
:param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str private_ip_allocation_method: Private IP allocation method.
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "public_ip_address", public_ip_address)
pulumi.set(__self__, "subnet", subnet)
pulumi.set(__self__, "type", type)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip_allocation_method is not None:
pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddress")
def public_ip_address(self) -> 'outputs.SubResourceResponse':
"""
Reference of the PublicIP resource.
"""
return pulumi.get(self, "public_ip_address")
@property
@pulumi.getter
def subnet(self) -> 'outputs.SubResourceResponse':
"""
Reference of the subnet resource.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter
def type(self) -> str:
"""
Ip configuration type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateIPAllocationMethod")
def private_ip_allocation_method(self) -> Optional[str]:
"""
Private IP allocation method.
"""
return pulumi.get(self, "private_ip_allocation_method")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class BgpPeerStatusResponseResult(dict):
"""
BGP peer status details.
"""
def __init__(__self__, *,
asn: int,
connected_duration: str,
local_address: str,
messages_received: float,
messages_sent: float,
neighbor: str,
routes_received: float,
state: str):
"""
BGP peer status details.
:param int asn: The autonomous system number of the remote BGP peer.
:param str connected_duration: For how long the peering has been up.
:param str local_address: The virtual network gateway's local address.
:param float messages_received: The number of BGP messages received.
:param float messages_sent: The number of BGP messages sent.
:param str neighbor: The remote BGP peer.
:param float routes_received: The number of routes learned from this peer.
:param str state: The BGP peer state.
"""
pulumi.set(__self__, "asn", asn)
pulumi.set(__self__, "connected_duration", connected_duration)
pulumi.set(__self__, "local_address", local_address)
pulumi.set(__self__, "messages_received", messages_received)
pulumi.set(__self__, "messages_sent", messages_sent)
pulumi.set(__self__, "neighbor", neighbor)
pulumi.set(__self__, "routes_received", routes_received)
pulumi.set(__self__, "state", state)
@property
@pulumi.getter
def asn(self) -> int:
"""
The autonomous system number of the remote BGP peer.
"""
return pulumi.get(self, "asn")
@property
@pulumi.getter(name="connectedDuration")
def connected_duration(self) -> str:
"""
For how long the peering has been up.
"""
return pulumi.get(self, "connected_duration")
@property
@pulumi.getter(name="localAddress")
def local_address(self) -> str:
"""
The virtual network gateway's local address.
"""
return pulumi.get(self, "local_address")
@property
@pulumi.getter(name="messagesReceived")
def messages_received(self) -> float:
"""
The number of BGP messages received.
"""
return pulumi.get(self, "messages_received")
@property
@pulumi.getter(name="messagesSent")
def messages_sent(self) -> float:
"""
The number of BGP messages sent.
"""
return pulumi.get(self, "messages_sent")
@property
@pulumi.getter
def neighbor(self) -> str:
"""
The remote BGP peer.
"""
return pulumi.get(self, "neighbor")
@property
@pulumi.getter(name="routesReceived")
def routes_received(self) -> float:
"""
The number of routes learned from this peer.
"""
return pulumi.get(self, "routes_received")
@property
@pulumi.getter
def state(self) -> str:
"""
The BGP peer state.
"""
return pulumi.get(self, "state")
@pulumi.output_type
class BgpSettingsResponse(dict):
"""
BGP settings details.
"""
def __init__(__self__, *,
asn: Optional[float] = None,
bgp_peering_address: Optional[str] = None,
peer_weight: Optional[int] = None):
"""
BGP settings details.
:param float asn: The BGP speaker's ASN.
:param str bgp_peering_address: The BGP peering address and BGP identifier of this BGP speaker.
:param int peer_weight: The weight added to routes learned from this BGP speaker.
"""
if asn is not None:
pulumi.set(__self__, "asn", asn)
if bgp_peering_address is not None:
pulumi.set(__self__, "bgp_peering_address", bgp_peering_address)
if peer_weight is not None:
pulumi.set(__self__, "peer_weight", peer_weight)
@property
@pulumi.getter
def asn(self) -> Optional[float]:
"""
The BGP speaker's ASN.
"""
return pulumi.get(self, "asn")
@property
@pulumi.getter(name="bgpPeeringAddress")
def bgp_peering_address(self) -> Optional[str]:
"""
The BGP peering address and BGP identifier of this BGP speaker.
"""
return pulumi.get(self, "bgp_peering_address")
@property
@pulumi.getter(name="peerWeight")
def peer_weight(self) -> Optional[int]:
"""
The weight added to routes learned from this BGP speaker.
"""
return pulumi.get(self, "peer_weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class CacheConfigurationResponse(dict):
"""
Caching settings for a caching-type route. To disable caching, do not provide a cacheConfiguration object.
"""
def __init__(__self__, *,
dynamic_compression: Optional[str] = None,
query_parameter_strip_directive: Optional[str] = None):
"""
Caching settings for a caching-type route. To disable caching, do not provide a cacheConfiguration object.
:param str dynamic_compression: Whether to use dynamic compression for cached content
:param str query_parameter_strip_directive: Treatment of URL query terms when forming the cache key.
"""
if dynamic_compression is not None:
pulumi.set(__self__, "dynamic_compression", dynamic_compression)
if query_parameter_strip_directive is not None:
pulumi.set(__self__, "query_parameter_strip_directive", query_parameter_strip_directive)
@property
@pulumi.getter(name="dynamicCompression")
def dynamic_compression(self) -> Optional[str]:
"""
Whether to use dynamic compression for cached content
"""
return pulumi.get(self, "dynamic_compression")
@property
@pulumi.getter(name="queryParameterStripDirective")
def query_parameter_strip_directive(self) -> Optional[str]:
"""
Treatment of URL query terms when forming the cache key.
"""
return pulumi.get(self, "query_parameter_strip_directive")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ConnectionMonitorDestinationResponse(dict):
"""
Describes the destination of connection monitor.
"""
def __init__(__self__, *,
address: Optional[str] = None,
port: Optional[int] = None,
resource_id: Optional[str] = None):
"""
Describes the destination of connection monitor.
:param str address: Address of the connection monitor destination (IP or domain name).
:param int port: The destination port used by connection monitor.
:param str resource_id: The ID of the resource used as the destination by connection monitor.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if port is not None:
pulumi.set(__self__, "port", port)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter
def address(self) -> Optional[str]:
"""
Address of the connection monitor destination (IP or domain name).
"""
return pulumi.get(self, "address")
@property
@pulumi.getter
def port(self) -> Optional[int]:
"""
The destination port used by connection monitor.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[str]:
"""
The ID of the resource used as the destination by connection monitor.
"""
return pulumi.get(self, "resource_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ConnectionMonitorSourceResponse(dict):
"""
Describes the source of connection monitor.
"""
def __init__(__self__, *,
resource_id: str,
port: Optional[int] = None):
"""
Describes the source of connection monitor.
:param str resource_id: The ID of the resource used as the source by connection monitor.
:param int port: The source port used by connection monitor.
"""
pulumi.set(__self__, "resource_id", resource_id)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> str:
"""
The ID of the resource used as the source by connection monitor.
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter
def port(self) -> Optional[int]:
"""
The source port used by connection monitor.
"""
return pulumi.get(self, "port")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerNetworkInterfaceConfigurationResponse(dict):
"""
Container network interface configuration child resource.
"""
def __init__(__self__, *,
provisioning_state: str,
type: str,
container_network_interfaces: Optional[Sequence['outputs.SubResourceResponse']] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
ip_configurations: Optional[Sequence['outputs.IPConfigurationProfileResponse']] = None,
name: Optional[str] = None):
"""
Container network interface configuration child resource.
:param str provisioning_state: The provisioning state of the resource.
:param str type: Sub Resource type.
:param Sequence['SubResourceResponseArgs'] container_network_interfaces: A list of container network interfaces created from this container network interface configuration.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param Sequence['IPConfigurationProfileResponseArgs'] ip_configurations: A list of ip configurations of the container network interface configuration.
:param str name: The name of the resource. This name can be used to access the resource.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if container_network_interfaces is not None:
pulumi.set(__self__, "container_network_interfaces", container_network_interfaces)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if ip_configurations is not None:
pulumi.set(__self__, "ip_configurations", ip_configurations)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Sub Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="containerNetworkInterfaces")
def container_network_interfaces(self) -> Optional[Sequence['outputs.SubResourceResponse']]:
"""
A list of container network interfaces created from this container network interface configuration.
"""
return pulumi.get(self, "container_network_interfaces")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[Sequence['outputs.IPConfigurationProfileResponse']]:
"""
A list of ip configurations of the container network interface configuration.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerNetworkInterfaceIpConfigurationResponse(dict):
"""
The ip configuration for a container network interface.
"""
def __init__(__self__, *,
provisioning_state: str,
type: str,
etag: Optional[str] = None,
name: Optional[str] = None):
"""
The ip configuration for a container network interface.
:param str provisioning_state: The provisioning state of the resource.
:param str type: Sub Resource type.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str name: The name of the resource. This name can be used to access the resource.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Sub Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerNetworkInterfaceResponse(dict):
"""
Container network interface child resource.
"""
def __init__(__self__, *,
provisioning_state: str,
type: str,
container: Optional['outputs.ContainerResponse'] = None,
container_network_interface_configuration: Optional['outputs.ContainerNetworkInterfaceConfigurationResponse'] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
ip_configurations: Optional[Sequence['outputs.ContainerNetworkInterfaceIpConfigurationResponse']] = None,
name: Optional[str] = None):
"""
Container network interface child resource.
:param str provisioning_state: The provisioning state of the resource.
:param str type: Sub Resource type.
:param 'ContainerResponseArgs' container: Reference to the container to which this container network interface is attached.
:param 'ContainerNetworkInterfaceConfigurationResponseArgs' container_network_interface_configuration: Container network interface configuration from which this container network interface is created.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param Sequence['ContainerNetworkInterfaceIpConfigurationResponseArgs'] ip_configurations: Reference to the ip configuration on this container nic.
:param str name: The name of the resource. This name can be used to access the resource.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if container is not None:
pulumi.set(__self__, "container", container)
if container_network_interface_configuration is not None:
pulumi.set(__self__, "container_network_interface_configuration", container_network_interface_configuration)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if ip_configurations is not None:
pulumi.set(__self__, "ip_configurations", ip_configurations)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Sub Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def container(self) -> Optional['outputs.ContainerResponse']:
"""
Reference to the container to which this container network interface is attached.
"""
return pulumi.get(self, "container")
@property
@pulumi.getter(name="containerNetworkInterfaceConfiguration")
def container_network_interface_configuration(self) -> Optional['outputs.ContainerNetworkInterfaceConfigurationResponse']:
"""
Container network interface configuration from which this container network interface is created.
"""
return pulumi.get(self, "container_network_interface_configuration")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[Sequence['outputs.ContainerNetworkInterfaceIpConfigurationResponse']]:
"""
Reference to the ip configuration on this container nic.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerResponse(dict):
"""
Reference to container resource in remote resource provider.
"""
def __init__(__self__, *,
id: Optional[str] = None):
"""
Reference to container resource in remote resource provider.
:param str id: Resource ID.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class CustomHttpsConfigurationResponse(dict):
"""
Https settings for a domain
"""
def __init__(__self__, *,
certificate_source: Optional[str] = None,
certificate_type: Optional[str] = None,
protocol_type: Optional[str] = None,
secret_name: Optional[str] = None,
secret_version: Optional[str] = None,
vault: Optional['outputs.KeyVaultCertificateSourceParametersResponseVault'] = None):
"""
Https settings for a domain
:param str certificate_source: Defines the source of the SSL certificate
:param str certificate_type: Defines the type of the certificate used for secure connections to a frontendEndpoint
:param str protocol_type: Defines the TLS extension protocol that is used for secure delivery
:param str secret_name: The name of the Key Vault secret representing the full certificate PFX
:param str secret_version: The version of the Key Vault secret representing the full certificate PFX
:param 'KeyVaultCertificateSourceParametersResponseVaultArgs' vault: The Key Vault containing the SSL certificate
"""
if certificate_source is not None:
pulumi.set(__self__, "certificate_source", certificate_source)
if certificate_type is not None:
pulumi.set(__self__, "certificate_type", certificate_type)
if protocol_type is not None:
pulumi.set(__self__, "protocol_type", protocol_type)
if secret_name is not None:
pulumi.set(__self__, "secret_name", secret_name)
if secret_version is not None:
pulumi.set(__self__, "secret_version", secret_version)
if vault is not None:
pulumi.set(__self__, "vault", vault)
@property
@pulumi.getter(name="certificateSource")
def certificate_source(self) -> Optional[str]:
"""
Defines the source of the SSL certificate
"""
return pulumi.get(self, "certificate_source")
@property
@pulumi.getter(name="certificateType")
def certificate_type(self) -> Optional[str]:
"""
Defines the type of the certificate used for secure connections to a frontendEndpoint
"""
return pulumi.get(self, "certificate_type")
@property
@pulumi.getter(name="protocolType")
def protocol_type(self) -> Optional[str]:
"""
Defines the TLS extension protocol that is used for secure delivery
"""
return pulumi.get(self, "protocol_type")
@property
@pulumi.getter(name="secretName")
def secret_name(self) -> Optional[str]:
"""
The name of the Key Vault secret representing the full certificate PFX
"""
return pulumi.get(self, "secret_name")
@property
@pulumi.getter(name="secretVersion")
def secret_version(self) -> Optional[str]:
"""
The version of the Key Vault secret representing the full certificate PFX
"""
return pulumi.get(self, "secret_version")
@property
@pulumi.getter
def vault(self) -> Optional['outputs.KeyVaultCertificateSourceParametersResponseVault']:
"""
The Key Vault containing the SSL certificate
"""
return pulumi.get(self, "vault")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DdosSettingsResponse(dict):
"""
Contains the DDoS protection settings of the public IP.
"""
def __init__(__self__, *,
ddos_custom_policy: Optional['outputs.SubResourceResponse'] = None,
protection_coverage: Optional[str] = None):
"""
Contains the DDoS protection settings of the public IP.
:param 'SubResourceResponseArgs' ddos_custom_policy: The DDoS custom policy associated with the public IP.
:param str protection_coverage: The DDoS protection policy customizability of the public IP. Only standard coverage will have the ability to be customized.
"""
if ddos_custom_policy is not None:
pulumi.set(__self__, "ddos_custom_policy", ddos_custom_policy)
if protection_coverage is not None:
pulumi.set(__self__, "protection_coverage", protection_coverage)
@property
@pulumi.getter(name="ddosCustomPolicy")
def ddos_custom_policy(self) -> Optional['outputs.SubResourceResponse']:
"""
The DDoS custom policy associated with the public IP.
"""
return pulumi.get(self, "ddos_custom_policy")
@property
@pulumi.getter(name="protectionCoverage")
def protection_coverage(self) -> Optional[str]:
"""
The DDoS protection policy customizability of the public IP. Only standard coverage will have the ability to be customized.
"""
return pulumi.get(self, "protection_coverage")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DelegationResponse(dict):
"""
Details the service to which the subnet is delegated.
"""
def __init__(__self__, *,
provisioning_state: str,
actions: Optional[Sequence[str]] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
service_name: Optional[str] = None):
"""
Details the service to which the subnet is delegated.
:param str provisioning_state: The provisioning state of the resource.
:param Sequence[str] actions: Describes the actions permitted to the service upon delegation.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a subnet. This name can be used to access the resource.
:param str service_name: The name of the service to whom the subnet should be delegated (e.g. Microsoft.Sql/servers).
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
if actions is not None:
pulumi.set(__self__, "actions", actions)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if service_name is not None:
pulumi.set(__self__, "service_name", service_name)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def actions(self) -> Optional[Sequence[str]]:
"""
Describes the actions permitted to the service upon delegation.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a subnet. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> Optional[str]:
"""
The name of the service to whom the subnet should be delegated (e.g. Microsoft.Sql/servers).
"""
return pulumi.get(self, "service_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DevicePropertiesResponse(dict):
"""
List of properties of the device.
"""
def __init__(__self__, *,
device_model: Optional[str] = None,
device_vendor: Optional[str] = None,
link_speed_in_mbps: Optional[int] = None):
"""
List of properties of the device.
:param str device_model: Model of the device.
:param str device_vendor: Name of the device Vendor.
:param int link_speed_in_mbps: Link speed.
"""
if device_model is not None:
pulumi.set(__self__, "device_model", device_model)
if device_vendor is not None:
pulumi.set(__self__, "device_vendor", device_vendor)
if link_speed_in_mbps is not None:
pulumi.set(__self__, "link_speed_in_mbps", link_speed_in_mbps)
@property
@pulumi.getter(name="deviceModel")
def device_model(self) -> Optional[str]:
"""
Model of the device.
"""
return pulumi.get(self, "device_model")
@property
@pulumi.getter(name="deviceVendor")
def device_vendor(self) -> Optional[str]:
"""
Name of the device Vendor.
"""
return pulumi.get(self, "device_vendor")
@property
@pulumi.getter(name="linkSpeedInMbps")
def link_speed_in_mbps(self) -> Optional[int]:
"""
Link speed.
"""
return pulumi.get(self, "link_speed_in_mbps")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DhcpOptionsResponse(dict):
"""
DhcpOptions contains an array of DNS servers available to VMs deployed in the virtual network. Standard DHCP option for a subnet overrides VNET DHCP options.
"""
def __init__(__self__, *,
dns_servers: Optional[Sequence[str]] = None):
"""
DhcpOptions contains an array of DNS servers available to VMs deployed in the virtual network. Standard DHCP option for a subnet overrides VNET DHCP options.
:param Sequence[str] dns_servers: The list of DNS servers IP addresses.
"""
if dns_servers is not None:
pulumi.set(__self__, "dns_servers", dns_servers)
@property
@pulumi.getter(name="dnsServers")
def dns_servers(self) -> Optional[Sequence[str]]:
"""
The list of DNS servers IP addresses.
"""
return pulumi.get(self, "dns_servers")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteCircuitAuthorizationResponse(dict):
"""
Authorization in an ExpressRouteCircuit resource.
"""
def __init__(__self__, *,
etag: str,
type: str,
authorization_key: Optional[str] = None,
authorization_use_status: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None):
"""
Authorization in an ExpressRouteCircuit resource.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str type: Type of the resource.
:param str authorization_key: The authorization key.
:param str authorization_use_status: The authorization use status.
:param str id: Resource ID.
:param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "type", type)
if authorization_key is not None:
pulumi.set(__self__, "authorization_key", authorization_key)
if authorization_use_status is not None:
pulumi.set(__self__, "authorization_use_status", authorization_use_status)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> Optional[str]:
"""
The authorization key.
"""
return pulumi.get(self, "authorization_key")
@property
@pulumi.getter(name="authorizationUseStatus")
def authorization_use_status(self) -> Optional[str]:
"""
The authorization use status.
"""
return pulumi.get(self, "authorization_use_status")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteCircuitConnectionResponse(dict):
"""
Express Route Circuit Connection in an ExpressRouteCircuitPeering resource.
"""
def __init__(__self__, *,
circuit_connection_status: str,
etag: str,
provisioning_state: str,
type: str,
address_prefix: Optional[str] = None,
authorization_key: Optional[str] = None,
express_route_circuit_peering: Optional['outputs.SubResourceResponse'] = None,
id: Optional[str] = None,
name: Optional[str] = None,
peer_express_route_circuit_peering: Optional['outputs.SubResourceResponse'] = None):
"""
Express Route Circuit Connection in an ExpressRouteCircuitPeering resource.
:param str circuit_connection_status: Express Route Circuit connection state.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str provisioning_state: Provisioning state of the circuit connection resource. Possible values are: 'Succeeded', 'Updating', 'Deleting', and 'Failed'.
:param str type: Type of the resource.
:param str address_prefix: /29 IP address space to carve out Customer addresses for tunnels.
:param str authorization_key: The authorization key.
:param 'SubResourceResponseArgs' express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the circuit initiating connection.
:param str id: Resource ID.
:param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param 'SubResourceResponseArgs' peer_express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the peered circuit.
"""
pulumi.set(__self__, "circuit_connection_status", circuit_connection_status)
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if address_prefix is not None:
pulumi.set(__self__, "address_prefix", address_prefix)
if authorization_key is not None:
pulumi.set(__self__, "authorization_key", authorization_key)
if express_route_circuit_peering is not None:
pulumi.set(__self__, "express_route_circuit_peering", express_route_circuit_peering)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if peer_express_route_circuit_peering is not None:
pulumi.set(__self__, "peer_express_route_circuit_peering", peer_express_route_circuit_peering)
@property
@pulumi.getter(name="circuitConnectionStatus")
def circuit_connection_status(self) -> str:
"""
Express Route Circuit connection state.
"""
return pulumi.get(self, "circuit_connection_status")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the circuit connection resource. Possible values are: 'Succeeded', 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> Optional[str]:
"""
/29 IP address space to carve out Customer addresses for tunnels.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> Optional[str]:
"""
The authorization key.
"""
return pulumi.get(self, "authorization_key")
@property
@pulumi.getter(name="expressRouteCircuitPeering")
def express_route_circuit_peering(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference to Express Route Circuit Private Peering Resource of the circuit initiating connection.
"""
return pulumi.get(self, "express_route_circuit_peering")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peerExpressRouteCircuitPeering")
def peer_express_route_circuit_peering(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference to Express Route Circuit Private Peering Resource of the peered circuit.
"""
return pulumi.get(self, "peer_express_route_circuit_peering")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteCircuitPeeringConfigResponse(dict):
"""
Specifies the peering configuration.
"""
def __init__(__self__, *,
advertised_communities: Optional[Sequence[str]] = None,
advertised_public_prefixes: Optional[Sequence[str]] = None,
advertised_public_prefixes_state: Optional[str] = None,
customer_asn: Optional[int] = None,
legacy_mode: Optional[int] = None,
routing_registry_name: Optional[str] = None):
"""
Specifies the peering configuration.
:param Sequence[str] advertised_communities: The communities of bgp peering. Specified for microsoft peering.
:param Sequence[str] advertised_public_prefixes: The reference of AdvertisedPublicPrefixes.
:param str advertised_public_prefixes_state: The advertised public prefix state of the Peering resource.
:param int customer_asn: The CustomerASN of the peering.
:param int legacy_mode: The legacy mode of the peering.
:param str routing_registry_name: The RoutingRegistryName of the configuration.
"""
if advertised_communities is not None:
pulumi.set(__self__, "advertised_communities", advertised_communities)
if advertised_public_prefixes is not None:
pulumi.set(__self__, "advertised_public_prefixes", advertised_public_prefixes)
if advertised_public_prefixes_state is not None:
pulumi.set(__self__, "advertised_public_prefixes_state", advertised_public_prefixes_state)
if customer_asn is not None:
pulumi.set(__self__, "customer_asn", customer_asn)
if legacy_mode is not None:
pulumi.set(__self__, "legacy_mode", legacy_mode)
if routing_registry_name is not None:
pulumi.set(__self__, "routing_registry_name", routing_registry_name)
@property
@pulumi.getter(name="advertisedCommunities")
def advertised_communities(self) -> Optional[Sequence[str]]:
"""
The communities of bgp peering. Specified for microsoft peering.
"""
return pulumi.get(self, "advertised_communities")
@property
@pulumi.getter(name="advertisedPublicPrefixes")
def advertised_public_prefixes(self) -> Optional[Sequence[str]]:
"""
The reference of AdvertisedPublicPrefixes.
"""
return pulumi.get(self, "advertised_public_prefixes")
@property
@pulumi.getter(name="advertisedPublicPrefixesState")
def advertised_public_prefixes_state(self) -> Optional[str]:
"""
The advertised public prefix state of the Peering resource.
"""
return pulumi.get(self, "advertised_public_prefixes_state")
@property
@pulumi.getter(name="customerASN")
def customer_asn(self) -> Optional[int]:
"""
The CustomerASN of the peering.
"""
return pulumi.get(self, "customer_asn")
@property
@pulumi.getter(name="legacyMode")
def legacy_mode(self) -> Optional[int]:
"""
The legacy mode of the peering.
"""
return pulumi.get(self, "legacy_mode")
@property
@pulumi.getter(name="routingRegistryName")
def routing_registry_name(self) -> Optional[str]:
"""
The RoutingRegistryName of the configuration.
"""
return pulumi.get(self, "routing_registry_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteCircuitPeeringIdResponse(dict):
"""
ExpressRoute circuit peering identifier.
"""
def __init__(__self__, *,
id: Optional[str] = None):
"""
ExpressRoute circuit peering identifier.
:param str id: The ID of the ExpressRoute circuit peering.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The ID of the ExpressRoute circuit peering.
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteCircuitPeeringResponse(dict):
"""
Peering in an ExpressRouteCircuit resource.
"""
def __init__(__self__, *,
etag: str,
peered_connections: Sequence['outputs.PeerExpressRouteCircuitConnectionResponse'],
type: str,
azure_asn: Optional[int] = None,
connections: Optional[Sequence['outputs.ExpressRouteCircuitConnectionResponse']] = None,
express_route_connection: Optional['outputs.ExpressRouteConnectionIdResponse'] = None,
gateway_manager_etag: Optional[str] = None,
id: Optional[str] = None,
ipv6_peering_config: Optional['outputs.Ipv6ExpressRouteCircuitPeeringConfigResponse'] = None,
last_modified_by: Optional[str] = None,
microsoft_peering_config: Optional['outputs.ExpressRouteCircuitPeeringConfigResponse'] = None,
name: Optional[str] = None,
peer_asn: Optional[float] = None,
peering_type: Optional[str] = None,
primary_azure_port: Optional[str] = None,
primary_peer_address_prefix: Optional[str] = None,
provisioning_state: Optional[str] = None,
route_filter: Optional['outputs.SubResourceResponse'] = None,
secondary_azure_port: Optional[str] = None,
secondary_peer_address_prefix: Optional[str] = None,
shared_key: Optional[str] = None,
state: Optional[str] = None,
stats: Optional['outputs.ExpressRouteCircuitStatsResponse'] = None,
vlan_id: Optional[int] = None):
"""
Peering in an ExpressRouteCircuit resource.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param Sequence['PeerExpressRouteCircuitConnectionResponseArgs'] peered_connections: The list of peered circuit connections associated with Azure Private Peering for this circuit.
:param str type: Type of the resource.
:param int azure_asn: The Azure ASN.
:param Sequence['ExpressRouteCircuitConnectionResponseArgs'] connections: The list of circuit connections associated with Azure Private Peering for this circuit.
:param 'ExpressRouteConnectionIdResponseArgs' express_route_connection: The ExpressRoute connection.
:param str gateway_manager_etag: The GatewayManager Etag.
:param str id: Resource ID.
:param 'Ipv6ExpressRouteCircuitPeeringConfigResponseArgs' ipv6_peering_config: The IPv6 peering configuration.
:param str last_modified_by: Gets whether the provider or the customer last modified the peering.
:param 'ExpressRouteCircuitPeeringConfigResponseArgs' microsoft_peering_config: The Microsoft peering configuration.
:param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param float peer_asn: The peer ASN.
:param str peering_type: The peering type.
:param str primary_azure_port: The primary port.
:param str primary_peer_address_prefix: The primary address prefix.
:param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param 'SubResourceResponseArgs' route_filter: The reference of the RouteFilter resource.
:param str secondary_azure_port: The secondary port.
:param str secondary_peer_address_prefix: The secondary address prefix.
:param str shared_key: The shared key.
:param str state: The peering state.
:param 'ExpressRouteCircuitStatsResponseArgs' stats: Gets peering stats.
:param int vlan_id: The VLAN ID.
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "peered_connections", peered_connections)
pulumi.set(__self__, "type", type)
if azure_asn is not None:
pulumi.set(__self__, "azure_asn", azure_asn)
if connections is not None:
pulumi.set(__self__, "connections", connections)
if express_route_connection is not None:
pulumi.set(__self__, "express_route_connection", express_route_connection)
if gateway_manager_etag is not None:
pulumi.set(__self__, "gateway_manager_etag", gateway_manager_etag)
if id is not None:
pulumi.set(__self__, "id", id)
if ipv6_peering_config is not None:
pulumi.set(__self__, "ipv6_peering_config", ipv6_peering_config)
if last_modified_by is not None:
pulumi.set(__self__, "last_modified_by", last_modified_by)
if microsoft_peering_config is not None:
pulumi.set(__self__, "microsoft_peering_config", microsoft_peering_config)
if name is not None:
pulumi.set(__self__, "name", name)
if peer_asn is not None:
pulumi.set(__self__, "peer_asn", peer_asn)
if peering_type is not None:
pulumi.set(__self__, "peering_type", peering_type)
if primary_azure_port is not None:
pulumi.set(__self__, "primary_azure_port", primary_azure_port)
if primary_peer_address_prefix is not None:
pulumi.set(__self__, "primary_peer_address_prefix", primary_peer_address_prefix)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if route_filter is not None:
pulumi.set(__self__, "route_filter", route_filter)
if secondary_azure_port is not None:
pulumi.set(__self__, "secondary_azure_port", secondary_azure_port)
if secondary_peer_address_prefix is not None:
pulumi.set(__self__, "secondary_peer_address_prefix", secondary_peer_address_prefix)
if shared_key is not None:
pulumi.set(__self__, "shared_key", shared_key)
if state is not None:
pulumi.set(__self__, "state", state)
if stats is not None:
pulumi.set(__self__, "stats", stats)
if vlan_id is not None:
pulumi.set(__self__, "vlan_id", vlan_id)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="peeredConnections")
def peered_connections(self) -> Sequence['outputs.PeerExpressRouteCircuitConnectionResponse']:
"""
The list of peered circuit connections associated with Azure Private Peering for this circuit.
"""
return pulumi.get(self, "peered_connections")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="azureASN")
def azure_asn(self) -> Optional[int]:
"""
The Azure ASN.
"""
return pulumi.get(self, "azure_asn")
@property
@pulumi.getter
def connections(self) -> Optional[Sequence['outputs.ExpressRouteCircuitConnectionResponse']]:
"""
The list of circuit connections associated with Azure Private Peering for this circuit.
"""
return pulumi.get(self, "connections")
@property
@pulumi.getter(name="expressRouteConnection")
def express_route_connection(self) -> Optional['outputs.ExpressRouteConnectionIdResponse']:
"""
The ExpressRoute connection.
"""
return pulumi.get(self, "express_route_connection")
@property
@pulumi.getter(name="gatewayManagerEtag")
def gateway_manager_etag(self) -> Optional[str]:
"""
The GatewayManager Etag.
"""
return pulumi.get(self, "gateway_manager_etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipv6PeeringConfig")
def ipv6_peering_config(self) -> Optional['outputs.Ipv6ExpressRouteCircuitPeeringConfigResponse']:
"""
The IPv6 peering configuration.
"""
return pulumi.get(self, "ipv6_peering_config")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
"""
Gets whether the provider or the customer last modified the peering.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="microsoftPeeringConfig")
def microsoft_peering_config(self) -> Optional['outputs.ExpressRouteCircuitPeeringConfigResponse']:
"""
The Microsoft peering configuration.
"""
return pulumi.get(self, "microsoft_peering_config")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peerASN")
def peer_asn(self) -> Optional[float]:
"""
The peer ASN.
"""
return pulumi.get(self, "peer_asn")
@property
@pulumi.getter(name="peeringType")
def peering_type(self) -> Optional[str]:
"""
The peering type.
"""
return pulumi.get(self, "peering_type")
@property
@pulumi.getter(name="primaryAzurePort")
def primary_azure_port(self) -> Optional[str]:
"""
The primary port.
"""
return pulumi.get(self, "primary_azure_port")
@property
@pulumi.getter(name="primaryPeerAddressPrefix")
def primary_peer_address_prefix(self) -> Optional[str]:
"""
The primary address prefix.
"""
return pulumi.get(self, "primary_peer_address_prefix")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="routeFilter")
def route_filter(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference of the RouteFilter resource.
"""
return pulumi.get(self, "route_filter")
@property
@pulumi.getter(name="secondaryAzurePort")
def secondary_azure_port(self) -> Optional[str]:
"""
The secondary port.
"""
return pulumi.get(self, "secondary_azure_port")
@property
@pulumi.getter(name="secondaryPeerAddressPrefix")
def secondary_peer_address_prefix(self) -> Optional[str]:
"""
The secondary address prefix.
"""
return pulumi.get(self, "secondary_peer_address_prefix")
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> Optional[str]:
"""
The shared key.
"""
return pulumi.get(self, "shared_key")
@property
@pulumi.getter
def state(self) -> Optional[str]:
"""
The peering state.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def stats(self) -> Optional['outputs.ExpressRouteCircuitStatsResponse']:
"""
Gets peering stats.
"""
return pulumi.get(self, "stats")
@property
@pulumi.getter(name="vlanId")
def vlan_id(self) -> Optional[int]:
"""
The VLAN ID.
"""
return pulumi.get(self, "vlan_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteCircuitServiceProviderPropertiesResponse(dict):
"""
Contains ServiceProviderProperties in an ExpressRouteCircuit.
"""
def __init__(__self__, *,
bandwidth_in_mbps: Optional[int] = None,
peering_location: Optional[str] = None,
service_provider_name: Optional[str] = None):
"""
Contains ServiceProviderProperties in an ExpressRouteCircuit.
:param int bandwidth_in_mbps: The BandwidthInMbps.
:param str peering_location: The peering location.
:param str service_provider_name: The serviceProviderName.
"""
if bandwidth_in_mbps is not None:
pulumi.set(__self__, "bandwidth_in_mbps", bandwidth_in_mbps)
if peering_location is not None:
pulumi.set(__self__, "peering_location", peering_location)
if service_provider_name is not None:
pulumi.set(__self__, "service_provider_name", service_provider_name)
@property
@pulumi.getter(name="bandwidthInMbps")
def bandwidth_in_mbps(self) -> Optional[int]:
"""
The BandwidthInMbps.
"""
return pulumi.get(self, "bandwidth_in_mbps")
@property
@pulumi.getter(name="peeringLocation")
def peering_location(self) -> Optional[str]:
"""
The peering location.
"""
return pulumi.get(self, "peering_location")
@property
@pulumi.getter(name="serviceProviderName")
def service_provider_name(self) -> Optional[str]:
"""
The serviceProviderName.
"""
return pulumi.get(self, "service_provider_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteCircuitSkuResponse(dict):
"""
Contains SKU in an ExpressRouteCircuit.
"""
def __init__(__self__, *,
family: Optional[str] = None,
name: Optional[str] = None,
tier: Optional[str] = None):
"""
Contains SKU in an ExpressRouteCircuit.
:param str family: The family of the SKU.
:param str name: The name of the SKU.
:param str tier: The tier of the SKU.
"""
if family is not None:
pulumi.set(__self__, "family", family)
if name is not None:
pulumi.set(__self__, "name", name)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def family(self) -> Optional[str]:
"""
The family of the SKU.
"""
return pulumi.get(self, "family")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the SKU.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tier(self) -> Optional[str]:
"""
The tier of the SKU.
"""
return pulumi.get(self, "tier")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteCircuitStatsResponse(dict):
"""
Contains stats associated with the peering.
"""
def __init__(__self__, *,
primarybytes_in: Optional[float] = None,
primarybytes_out: Optional[float] = None,
secondarybytes_in: Optional[float] = None,
secondarybytes_out: Optional[float] = None):
"""
Contains stats associated with the peering.
:param float primarybytes_in: Gets BytesIn of the peering.
:param float primarybytes_out: Gets BytesOut of the peering.
:param float secondarybytes_in: Gets BytesIn of the peering.
:param float secondarybytes_out: Gets BytesOut of the peering.
"""
if primarybytes_in is not None:
pulumi.set(__self__, "primarybytes_in", primarybytes_in)
if primarybytes_out is not None:
pulumi.set(__self__, "primarybytes_out", primarybytes_out)
if secondarybytes_in is not None:
pulumi.set(__self__, "secondarybytes_in", secondarybytes_in)
if secondarybytes_out is not None:
pulumi.set(__self__, "secondarybytes_out", secondarybytes_out)
@property
@pulumi.getter(name="primarybytesIn")
def primarybytes_in(self) -> Optional[float]:
"""
Gets BytesIn of the peering.
"""
return pulumi.get(self, "primarybytes_in")
@property
@pulumi.getter(name="primarybytesOut")
def primarybytes_out(self) -> Optional[float]:
"""
Gets BytesOut of the peering.
"""
return pulumi.get(self, "primarybytes_out")
@property
@pulumi.getter(name="secondarybytesIn")
def secondarybytes_in(self) -> Optional[float]:
"""
Gets BytesIn of the peering.
"""
return pulumi.get(self, "secondarybytes_in")
@property
@pulumi.getter(name="secondarybytesOut")
def secondarybytes_out(self) -> Optional[float]:
"""
Gets BytesOut of the peering.
"""
return pulumi.get(self, "secondarybytes_out")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteConnectionIdResponse(dict):
"""
The ID of the ExpressRouteConnection.
"""
def __init__(__self__, *,
id: str):
"""
The ID of the ExpressRouteConnection.
:param str id: The ID of the ExpressRouteConnection.
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the ExpressRouteConnection.
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteConnectionResponse(dict):
"""
ExpressRouteConnection resource.
"""
def __init__(__self__, *,
express_route_circuit_peering: 'outputs.ExpressRouteCircuitPeeringIdResponse',
name: str,
provisioning_state: str,
authorization_key: Optional[str] = None,
id: Optional[str] = None,
routing_weight: Optional[int] = None):
"""
ExpressRouteConnection resource.
:param 'ExpressRouteCircuitPeeringIdResponseArgs' express_route_circuit_peering: The ExpressRoute circuit peering.
:param str name: The name of the resource.
:param str provisioning_state: The provisioning state of the resource.
:param str authorization_key: Authorization key to establish the connection.
:param str id: Resource ID.
:param int routing_weight: The routing weight associated to the connection.
"""
pulumi.set(__self__, "express_route_circuit_peering", express_route_circuit_peering)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "provisioning_state", provisioning_state)
if authorization_key is not None:
pulumi.set(__self__, "authorization_key", authorization_key)
if id is not None:
pulumi.set(__self__, "id", id)
if routing_weight is not None:
pulumi.set(__self__, "routing_weight", routing_weight)
@property
@pulumi.getter(name="expressRouteCircuitPeering")
def express_route_circuit_peering(self) -> 'outputs.ExpressRouteCircuitPeeringIdResponse':
"""
The ExpressRoute circuit peering.
"""
return pulumi.get(self, "express_route_circuit_peering")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> Optional[str]:
"""
Authorization key to establish the connection.
"""
return pulumi.get(self, "authorization_key")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="routingWeight")
def routing_weight(self) -> Optional[int]:
"""
The routing weight associated to the connection.
"""
return pulumi.get(self, "routing_weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteGatewayPropertiesResponseAutoScaleConfiguration(dict):
"""
Configuration for auto scaling.
"""
def __init__(__self__, *,
bounds: Optional['outputs.ExpressRouteGatewayPropertiesResponseBounds'] = None):
"""
Configuration for auto scaling.
:param 'ExpressRouteGatewayPropertiesResponseBoundsArgs' bounds: Minimum and maximum number of scale units to deploy.
"""
if bounds is not None:
pulumi.set(__self__, "bounds", bounds)
@property
@pulumi.getter
def bounds(self) -> Optional['outputs.ExpressRouteGatewayPropertiesResponseBounds']:
"""
Minimum and maximum number of scale units to deploy.
"""
return pulumi.get(self, "bounds")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteGatewayPropertiesResponseBounds(dict):
"""
Minimum and maximum number of scale units to deploy.
"""
def __init__(__self__, *,
max: Optional[int] = None,
min: Optional[int] = None):
"""
Minimum and maximum number of scale units to deploy.
:param int max: Maximum number of scale units deployed for ExpressRoute gateway.
:param int min: Minimum number of scale units deployed for ExpressRoute gateway.
"""
if max is not None:
pulumi.set(__self__, "max", max)
if min is not None:
pulumi.set(__self__, "min", min)
@property
@pulumi.getter
def max(self) -> Optional[int]:
"""
Maximum number of scale units deployed for ExpressRoute gateway.
"""
return pulumi.get(self, "max")
@property
@pulumi.getter
def min(self) -> Optional[int]:
"""
Minimum number of scale units deployed for ExpressRoute gateway.
"""
return pulumi.get(self, "min")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteLinkResponse(dict):
"""
ExpressRouteLink child resource definition.
"""
def __init__(__self__, *,
connector_type: str,
etag: str,
interface_name: str,
patch_panel_id: str,
provisioning_state: str,
rack_id: str,
router_name: str,
admin_state: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None):
"""
ExpressRouteLink child resource definition.
:param str connector_type: Physical fiber port type.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str interface_name: Name of Azure router interface.
:param str patch_panel_id: Mapping between physical port to patch panel port.
:param str provisioning_state: The provisioning state of the ExpressRouteLink resource. Possible values are: 'Succeeded', 'Updating', 'Deleting', and 'Failed'.
:param str rack_id: Mapping of physical patch panel to rack.
:param str router_name: Name of Azure router associated with physical port.
:param str admin_state: Administrative state of the physical port.
:param str id: Resource ID.
:param str name: Name of child port resource that is unique among child port resources of the parent.
"""
pulumi.set(__self__, "connector_type", connector_type)
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "interface_name", interface_name)
pulumi.set(__self__, "patch_panel_id", patch_panel_id)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "rack_id", rack_id)
pulumi.set(__self__, "router_name", router_name)
if admin_state is not None:
pulumi.set(__self__, "admin_state", admin_state)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="connectorType")
def connector_type(self) -> str:
"""
Physical fiber port type.
"""
return pulumi.get(self, "connector_type")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="interfaceName")
def interface_name(self) -> str:
"""
Name of Azure router interface.
"""
return pulumi.get(self, "interface_name")
@property
@pulumi.getter(name="patchPanelId")
def patch_panel_id(self) -> str:
"""
Mapping between physical port to patch panel port.
"""
return pulumi.get(self, "patch_panel_id")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the ExpressRouteLink resource. Possible values are: 'Succeeded', 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="rackId")
def rack_id(self) -> str:
"""
Mapping of physical patch panel to rack.
"""
return pulumi.get(self, "rack_id")
@property
@pulumi.getter(name="routerName")
def router_name(self) -> str:
"""
Name of Azure router associated with physical port.
"""
return pulumi.get(self, "router_name")
@property
@pulumi.getter(name="adminState")
def admin_state(self) -> Optional[str]:
"""
Administrative state of the physical port.
"""
return pulumi.get(self, "admin_state")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of child port resource that is unique among child port resources of the parent.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ForwardingConfigurationResponse(dict):
"""
Describes Forwarding Route.
"""
def __init__(__self__, *,
odata_type: str,
backend_pool: Optional['outputs.SubResourceResponse'] = None,
cache_configuration: Optional['outputs.CacheConfigurationResponse'] = None,
custom_forwarding_path: Optional[str] = None,
forwarding_protocol: Optional[str] = None):
"""
Describes Forwarding Route.
:param str odata_type:
Expected value is '#Microsoft.Azure.FrontDoor.Models.FrontdoorForwardingConfiguration'.
:param 'SubResourceResponseArgs' backend_pool: A reference to the BackendPool which this rule routes to.
:param 'CacheConfigurationResponseArgs' cache_configuration: The caching configuration associated with this rule.
:param str custom_forwarding_path: A custom path used to rewrite resource paths matched by this rule. Leave empty to use incoming path.
:param str forwarding_protocol: Protocol this rule will use when forwarding traffic to backends.
"""
pulumi.set(__self__, "odata_type", '#Microsoft.Azure.FrontDoor.Models.FrontdoorForwardingConfiguration')
if backend_pool is not None:
pulumi.set(__self__, "backend_pool", backend_pool)
if cache_configuration is not None:
pulumi.set(__self__, "cache_configuration", cache_configuration)
if custom_forwarding_path is not None:
pulumi.set(__self__, "custom_forwarding_path", custom_forwarding_path)
if forwarding_protocol is not None:
pulumi.set(__self__, "forwarding_protocol", forwarding_protocol)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> str:
"""
Expected value is '#Microsoft.Azure.FrontDoor.Models.FrontdoorForwardingConfiguration'.
"""
return pulumi.get(self, "odata_type")
@property
@pulumi.getter(name="backendPool")
def backend_pool(self) -> Optional['outputs.SubResourceResponse']:
"""
A reference to the BackendPool which this rule routes to.
"""
return pulumi.get(self, "backend_pool")
@property
@pulumi.getter(name="cacheConfiguration")
def cache_configuration(self) -> Optional['outputs.CacheConfigurationResponse']:
"""
The caching configuration associated with this rule.
"""
return pulumi.get(self, "cache_configuration")
@property
@pulumi.getter(name="customForwardingPath")
def custom_forwarding_path(self) -> Optional[str]:
"""
A custom path used to rewrite resource paths matched by this rule. Leave empty to use incoming path.
"""
return pulumi.get(self, "custom_forwarding_path")
@property
@pulumi.getter(name="forwardingProtocol")
def forwarding_protocol(self) -> Optional[str]:
"""
Protocol this rule will use when forwarding traffic to backends.
"""
return pulumi.get(self, "forwarding_protocol")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class FrontendEndpointResponse(dict):
"""
A frontend endpoint used for routing.
"""
def __init__(__self__, *,
custom_https_configuration: 'outputs.CustomHttpsConfigurationResponse',
custom_https_provisioning_state: str,
custom_https_provisioning_substate: str,
resource_state: str,
type: str,
host_name: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
session_affinity_enabled_state: Optional[str] = None,
session_affinity_ttl_seconds: Optional[int] = None,
web_application_firewall_policy_link: Optional['outputs.FrontendEndpointUpdateParametersResponseWebApplicationFirewallPolicyLink'] = None):
"""
A frontend endpoint used for routing.
:param 'CustomHttpsConfigurationResponseArgs' custom_https_configuration: The configuration specifying how to enable HTTPS
:param str custom_https_provisioning_state: Provisioning status of Custom Https of the frontendEndpoint.
:param str custom_https_provisioning_substate: Provisioning substate shows the progress of custom HTTPS enabling/disabling process step by step.
:param str resource_state: Resource status.
:param str type: Resource type.
:param str host_name: The host name of the frontendEndpoint. Must be a domain name.
:param str id: Resource ID.
:param str name: Resource name.
:param str session_affinity_enabled_state: Whether to allow session affinity on this host. Valid options are 'Enabled' or 'Disabled'
:param int session_affinity_ttl_seconds: UNUSED. This field will be ignored. The TTL to use in seconds for session affinity, if applicable.
:param 'FrontendEndpointUpdateParametersResponseWebApplicationFirewallPolicyLinkArgs' web_application_firewall_policy_link: Defines the Web Application Firewall policy for each host (if applicable)
"""
pulumi.set(__self__, "custom_https_configuration", custom_https_configuration)
pulumi.set(__self__, "custom_https_provisioning_state", custom_https_provisioning_state)
pulumi.set(__self__, "custom_https_provisioning_substate", custom_https_provisioning_substate)
pulumi.set(__self__, "resource_state", resource_state)
pulumi.set(__self__, "type", type)
if host_name is not None:
pulumi.set(__self__, "host_name", host_name)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if session_affinity_enabled_state is not None:
pulumi.set(__self__, "session_affinity_enabled_state", session_affinity_enabled_state)
if session_affinity_ttl_seconds is not None:
pulumi.set(__self__, "session_affinity_ttl_seconds", session_affinity_ttl_seconds)
if web_application_firewall_policy_link is not None:
pulumi.set(__self__, "web_application_firewall_policy_link", web_application_firewall_policy_link)
@property
@pulumi.getter(name="customHttpsConfiguration")
def custom_https_configuration(self) -> 'outputs.CustomHttpsConfigurationResponse':
"""
The configuration specifying how to enable HTTPS
"""
return pulumi.get(self, "custom_https_configuration")
@property
@pulumi.getter(name="customHttpsProvisioningState")
def custom_https_provisioning_state(self) -> str:
"""
Provisioning status of Custom Https of the frontendEndpoint.
"""
return pulumi.get(self, "custom_https_provisioning_state")
@property
@pulumi.getter(name="customHttpsProvisioningSubstate")
def custom_https_provisioning_substate(self) -> str:
"""
Provisioning substate shows the progress of custom HTTPS enabling/disabling process step by step.
"""
return pulumi.get(self, "custom_https_provisioning_substate")
@property
@pulumi.getter(name="resourceState")
def resource_state(self) -> str:
"""
Resource status.
"""
return pulumi.get(self, "resource_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="hostName")
def host_name(self) -> Optional[str]:
"""
The host name of the frontendEndpoint. Must be a domain name.
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="sessionAffinityEnabledState")
def session_affinity_enabled_state(self) -> Optional[str]:
"""
Whether to allow session affinity on this host. Valid options are 'Enabled' or 'Disabled'
"""
return pulumi.get(self, "session_affinity_enabled_state")
@property
@pulumi.getter(name="sessionAffinityTtlSeconds")
def session_affinity_ttl_seconds(self) -> Optional[int]:
"""
UNUSED. This field will be ignored. The TTL to use in seconds for session affinity, if applicable.
"""
return pulumi.get(self, "session_affinity_ttl_seconds")
@property
@pulumi.getter(name="webApplicationFirewallPolicyLink")
def web_application_firewall_policy_link(self) -> Optional['outputs.FrontendEndpointUpdateParametersResponseWebApplicationFirewallPolicyLink']:
"""
Defines the Web Application Firewall policy for each host (if applicable)
"""
return pulumi.get(self, "web_application_firewall_policy_link")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class FrontendEndpointUpdateParametersResponseWebApplicationFirewallPolicyLink(dict):
"""
Defines the Web Application Firewall policy for each host (if applicable)
"""
def __init__(__self__, *,
id: Optional[str] = None):
"""
Defines the Web Application Firewall policy for each host (if applicable)
:param str id: Resource ID.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class FrontendIPConfigurationResponse(dict):
"""
Frontend IP address of the load balancer.
"""
def __init__(__self__, *,
inbound_nat_pools: Sequence['outputs.SubResourceResponse'],
inbound_nat_rules: Sequence['outputs.SubResourceResponse'],
load_balancing_rules: Sequence['outputs.SubResourceResponse'],
outbound_rules: Sequence['outputs.SubResourceResponse'],
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
private_ip_address: Optional[str] = None,
private_ip_address_version: Optional[str] = None,
private_ip_allocation_method: Optional[str] = None,
provisioning_state: Optional[str] = None,
public_ip_address: Optional['outputs.PublicIPAddressResponse'] = None,
public_ip_prefix: Optional['outputs.SubResourceResponse'] = None,
subnet: Optional['outputs.SubnetResponse'] = None,
zones: Optional[Sequence[str]] = None):
"""
Frontend IP address of the load balancer.
:param Sequence['SubResourceResponseArgs'] inbound_nat_pools: Read only. Inbound pools URIs that use this frontend IP.
:param Sequence['SubResourceResponseArgs'] inbound_nat_rules: Read only. Inbound rules URIs that use this frontend IP.
:param Sequence['SubResourceResponseArgs'] load_balancing_rules: Gets load balancing rules URIs that use this frontend IP.
:param Sequence['SubResourceResponseArgs'] outbound_rules: Read only. Outbound rules URIs that use this frontend IP.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str private_ip_address: The private IP address of the IP configuration.
:param str private_ip_address_version: It represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4.
:param str private_ip_allocation_method: The Private IP allocation method.
:param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param 'PublicIPAddressResponseArgs' public_ip_address: The reference of the Public IP resource.
:param 'SubResourceResponseArgs' public_ip_prefix: The reference of the Public IP Prefix resource.
:param 'SubnetResponseArgs' subnet: The reference of the subnet resource.
:param Sequence[str] zones: A list of availability zones denoting the IP allocated for the resource needs to come from.
"""
pulumi.set(__self__, "inbound_nat_pools", inbound_nat_pools)
pulumi.set(__self__, "inbound_nat_rules", inbound_nat_rules)
pulumi.set(__self__, "load_balancing_rules", load_balancing_rules)
pulumi.set(__self__, "outbound_rules", outbound_rules)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip_address is not None:
pulumi.set(__self__, "private_ip_address", private_ip_address)
if private_ip_address_version is not None:
pulumi.set(__self__, "private_ip_address_version", private_ip_address_version)
if private_ip_allocation_method is not None:
pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address is not None:
pulumi.set(__self__, "public_ip_address", public_ip_address)
if public_ip_prefix is not None:
pulumi.set(__self__, "public_ip_prefix", public_ip_prefix)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if zones is not None:
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="inboundNatPools")
def inbound_nat_pools(self) -> Sequence['outputs.SubResourceResponse']:
"""
Read only. Inbound pools URIs that use this frontend IP.
"""
return pulumi.get(self, "inbound_nat_pools")
@property
@pulumi.getter(name="inboundNatRules")
def inbound_nat_rules(self) -> Sequence['outputs.SubResourceResponse']:
"""
Read only. Inbound rules URIs that use this frontend IP.
"""
return pulumi.get(self, "inbound_nat_rules")
@property
@pulumi.getter(name="loadBalancingRules")
def load_balancing_rules(self) -> Sequence['outputs.SubResourceResponse']:
"""
Gets load balancing rules URIs that use this frontend IP.
"""
return pulumi.get(self, "load_balancing_rules")
@property
@pulumi.getter(name="outboundRules")
def outbound_rules(self) -> Sequence['outputs.SubResourceResponse']:
"""
Read only. Outbound rules URIs that use this frontend IP.
"""
return pulumi.get(self, "outbound_rules")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateIPAddress")
def private_ip_address(self) -> Optional[str]:
"""
The private IP address of the IP configuration.
"""
return pulumi.get(self, "private_ip_address")
@property
@pulumi.getter(name="privateIPAddressVersion")
def private_ip_address_version(self) -> Optional[str]:
"""
It represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4.
"""
return pulumi.get(self, "private_ip_address_version")
@property
@pulumi.getter(name="privateIPAllocationMethod")
def private_ip_allocation_method(self) -> Optional[str]:
"""
The Private IP allocation method.
"""
return pulumi.get(self, "private_ip_allocation_method")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddress")
def public_ip_address(self) -> Optional['outputs.PublicIPAddressResponse']:
"""
The reference of the Public IP resource.
"""
return pulumi.get(self, "public_ip_address")
@property
@pulumi.getter(name="publicIPPrefix")
def public_ip_prefix(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference of the Public IP Prefix resource.
"""
return pulumi.get(self, "public_ip_prefix")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubnetResponse']:
"""
The reference of the subnet resource.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter
def zones(self) -> Optional[Sequence[str]]:
"""
A list of availability zones denoting the IP allocated for the resource needs to come from.
"""
return pulumi.get(self, "zones")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GatewayRouteResponseResult(dict):
"""
Gateway routing details.
"""
def __init__(__self__, *,
as_path: str,
local_address: str,
network: str,
next_hop: str,
origin: str,
source_peer: str,
weight: int):
"""
Gateway routing details.
:param str as_path: The route's AS path sequence.
:param str local_address: The gateway's local address.
:param str network: The route's network prefix.
:param str next_hop: The route's next hop.
:param str origin: The source this route was learned from.
:param str source_peer: The peer this route was learned from.
:param int weight: The route's weight.
"""
pulumi.set(__self__, "as_path", as_path)
pulumi.set(__self__, "local_address", local_address)
pulumi.set(__self__, "network", network)
pulumi.set(__self__, "next_hop", next_hop)
pulumi.set(__self__, "origin", origin)
pulumi.set(__self__, "source_peer", source_peer)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="asPath")
def as_path(self) -> str:
"""
The route's AS path sequence.
"""
return pulumi.get(self, "as_path")
@property
@pulumi.getter(name="localAddress")
def local_address(self) -> str:
"""
The gateway's local address.
"""
return pulumi.get(self, "local_address")
@property
@pulumi.getter
def network(self) -> str:
"""
The route's network prefix.
"""
return pulumi.get(self, "network")
@property
@pulumi.getter(name="nextHop")
def next_hop(self) -> str:
"""
The route's next hop.
"""
return pulumi.get(self, "next_hop")
@property
@pulumi.getter
def origin(self) -> str:
"""
The source this route was learned from.
"""
return pulumi.get(self, "origin")
@property
@pulumi.getter(name="sourcePeer")
def source_peer(self) -> str:
"""
The peer this route was learned from.
"""
return pulumi.get(self, "source_peer")
@property
@pulumi.getter
def weight(self) -> int:
"""
The route's weight.
"""
return pulumi.get(self, "weight")
@pulumi.output_type
class HealthProbeSettingsModelResponse(dict):
"""
Load balancing settings for a backend pool
"""
def __init__(__self__, *,
resource_state: str,
type: str,
id: Optional[str] = None,
interval_in_seconds: Optional[int] = None,
name: Optional[str] = None,
path: Optional[str] = None,
protocol: Optional[str] = None):
"""
Load balancing settings for a backend pool
:param str resource_state: Resource status.
:param str type: Resource type.
:param str id: Resource ID.
:param int interval_in_seconds: The number of seconds between health probes.
:param str name: Resource name.
:param str path: The path to use for the health probe. Default is /
:param str protocol: Protocol scheme to use for this probe
"""
pulumi.set(__self__, "resource_state", resource_state)
pulumi.set(__self__, "type", type)
if id is not None:
pulumi.set(__self__, "id", id)
if interval_in_seconds is not None:
pulumi.set(__self__, "interval_in_seconds", interval_in_seconds)
if name is not None:
pulumi.set(__self__, "name", name)
if path is not None:
pulumi.set(__self__, "path", path)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
@property
@pulumi.getter(name="resourceState")
def resource_state(self) -> str:
"""
Resource status.
"""
return pulumi.get(self, "resource_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="intervalInSeconds")
def interval_in_seconds(self) -> Optional[int]:
"""
The number of seconds between health probes.
"""
return pulumi.get(self, "interval_in_seconds")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def path(self) -> Optional[str]:
"""
The path to use for the health probe. Default is /
"""
return pulumi.get(self, "path")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
Protocol scheme to use for this probe
"""
return pulumi.get(self, "protocol")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class HubVirtualNetworkConnectionResponse(dict):
"""
HubVirtualNetworkConnection Resource.
"""
def __init__(__self__, *,
etag: str,
provisioning_state: str,
allow_hub_to_remote_vnet_transit: Optional[bool] = None,
allow_remote_vnet_to_use_hub_vnet_gateways: Optional[bool] = None,
enable_internet_security: Optional[bool] = None,
id: Optional[str] = None,
name: Optional[str] = None,
remote_virtual_network: Optional['outputs.SubResourceResponse'] = None):
"""
HubVirtualNetworkConnection Resource.
:param str etag: Gets a unique read-only string that changes whenever the resource is updated.
:param str provisioning_state: The provisioning state of the resource.
:param bool allow_hub_to_remote_vnet_transit: VirtualHub to RemoteVnet transit to enabled or not.
:param bool allow_remote_vnet_to_use_hub_vnet_gateways: Allow RemoteVnet to use Virtual Hub's gateways.
:param bool enable_internet_security: Enable internet security.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param 'SubResourceResponseArgs' remote_virtual_network: Reference to the remote virtual network.
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "provisioning_state", provisioning_state)
if allow_hub_to_remote_vnet_transit is not None:
pulumi.set(__self__, "allow_hub_to_remote_vnet_transit", allow_hub_to_remote_vnet_transit)
if allow_remote_vnet_to_use_hub_vnet_gateways is not None:
pulumi.set(__self__, "allow_remote_vnet_to_use_hub_vnet_gateways", allow_remote_vnet_to_use_hub_vnet_gateways)
if enable_internet_security is not None:
pulumi.set(__self__, "enable_internet_security", enable_internet_security)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if remote_virtual_network is not None:
pulumi.set(__self__, "remote_virtual_network", remote_virtual_network)
@property
@pulumi.getter
def etag(self) -> str:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="allowHubToRemoteVnetTransit")
def allow_hub_to_remote_vnet_transit(self) -> Optional[bool]:
"""
VirtualHub to RemoteVnet transit to enabled or not.
"""
return pulumi.get(self, "allow_hub_to_remote_vnet_transit")
@property
@pulumi.getter(name="allowRemoteVnetToUseHubVnetGateways")
def allow_remote_vnet_to_use_hub_vnet_gateways(self) -> Optional[bool]:
"""
Allow RemoteVnet to use Virtual Hub's gateways.
"""
return pulumi.get(self, "allow_remote_vnet_to_use_hub_vnet_gateways")
@property
@pulumi.getter(name="enableInternetSecurity")
def enable_internet_security(self) -> Optional[bool]:
"""
Enable internet security.
"""
return pulumi.get(self, "enable_internet_security")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="remoteVirtualNetwork")
def remote_virtual_network(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference to the remote virtual network.
"""
return pulumi.get(self, "remote_virtual_network")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IPConfigurationProfileResponse(dict):
"""
IP configuration profile child resource.
"""
def __init__(__self__, *,
provisioning_state: str,
type: str,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
subnet: Optional['outputs.SubnetResponse'] = None):
"""
IP configuration profile child resource.
:param str provisioning_state: The provisioning state of the resource.
:param str type: Sub Resource type.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource. This name can be used to access the resource.
:param 'SubnetResponseArgs' subnet: The reference of the subnet resource to create a container network interface ip configuration.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Sub Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubnetResponse']:
"""
The reference of the subnet resource to create a container network interface ip configuration.
"""
return pulumi.get(self, "subnet")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IPConfigurationResponse(dict):
"""
IP configuration.
"""
def __init__(__self__, *,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
private_ip_address: Optional[str] = None,
private_ip_allocation_method: Optional[str] = None,
provisioning_state: Optional[str] = None,
public_ip_address: Optional['outputs.PublicIPAddressResponse'] = None,
subnet: Optional['outputs.SubnetResponse'] = None):
"""
IP configuration.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str private_ip_address: The private IP address of the IP configuration.
:param str private_ip_allocation_method: The private IP address allocation method.
:param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param 'PublicIPAddressResponseArgs' public_ip_address: The reference of the public IP resource.
:param 'SubnetResponseArgs' subnet: The reference of the subnet resource.
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip_address is not None:
pulumi.set(__self__, "private_ip_address", private_ip_address)
if private_ip_allocation_method is not None:
pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address is not None:
pulumi.set(__self__, "public_ip_address", public_ip_address)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateIPAddress")
def private_ip_address(self) -> Optional[str]:
"""
The private IP address of the IP configuration.
"""
return pulumi.get(self, "private_ip_address")
@property
@pulumi.getter(name="privateIPAllocationMethod")
def private_ip_allocation_method(self) -> Optional[str]:
"""
The private IP address allocation method.
"""
return pulumi.get(self, "private_ip_allocation_method")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddress")
def public_ip_address(self) -> Optional['outputs.PublicIPAddressResponse']:
"""
The reference of the public IP resource.
"""
return pulumi.get(self, "public_ip_address")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubnetResponse']:
"""
The reference of the subnet resource.
"""
return pulumi.get(self, "subnet")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InboundNatPoolResponse(dict):
"""
Inbound NAT pool of the load balancer.
"""
def __init__(__self__, *,
backend_port: int,
frontend_port_range_end: int,
frontend_port_range_start: int,
protocol: str,
enable_floating_ip: Optional[bool] = None,
enable_tcp_reset: Optional[bool] = None,
etag: Optional[str] = None,
frontend_ip_configuration: Optional['outputs.SubResourceResponse'] = None,
id: Optional[str] = None,
idle_timeout_in_minutes: Optional[int] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None):
"""
Inbound NAT pool of the load balancer.
:param int backend_port: The port used for internal connections on the endpoint. Acceptable values are between 1 and 65535.
:param int frontend_port_range_end: The last port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values range between 1 and 65535.
:param int frontend_port_range_start: The first port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values range between 1 and 65534.
:param str protocol: The reference to the transport protocol used by the inbound NAT pool.
:param bool enable_floating_ip: Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
:param bool enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param 'SubResourceResponseArgs' frontend_ip_configuration: A reference to frontend IP addresses.
:param str id: Resource ID.
:param int idle_timeout_in_minutes: The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str provisioning_state: Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
pulumi.set(__self__, "backend_port", backend_port)
pulumi.set(__self__, "frontend_port_range_end", frontend_port_range_end)
pulumi.set(__self__, "frontend_port_range_start", frontend_port_range_start)
pulumi.set(__self__, "protocol", protocol)
if enable_floating_ip is not None:
pulumi.set(__self__, "enable_floating_ip", enable_floating_ip)
if enable_tcp_reset is not None:
pulumi.set(__self__, "enable_tcp_reset", enable_tcp_reset)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if frontend_ip_configuration is not None:
pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration)
if id is not None:
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes is not None:
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="backendPort")
def backend_port(self) -> int:
"""
The port used for internal connections on the endpoint. Acceptable values are between 1 and 65535.
"""
return pulumi.get(self, "backend_port")
@property
@pulumi.getter(name="frontendPortRangeEnd")
def frontend_port_range_end(self) -> int:
"""
The last port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values range between 1 and 65535.
"""
return pulumi.get(self, "frontend_port_range_end")
@property
@pulumi.getter(name="frontendPortRangeStart")
def frontend_port_range_start(self) -> int:
"""
The first port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values range between 1 and 65534.
"""
return pulumi.get(self, "frontend_port_range_start")
@property
@pulumi.getter
def protocol(self) -> str:
"""
The reference to the transport protocol used by the inbound NAT pool.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="enableFloatingIP")
def enable_floating_ip(self) -> Optional[bool]:
"""
Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
"""
return pulumi.get(self, "enable_floating_ip")
@property
@pulumi.getter(name="enableTcpReset")
def enable_tcp_reset(self) -> Optional[bool]:
"""
Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
"""
return pulumi.get(self, "enable_tcp_reset")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="frontendIPConfiguration")
def frontend_ip_configuration(self) -> Optional['outputs.SubResourceResponse']:
"""
A reference to frontend IP addresses.
"""
return pulumi.get(self, "frontend_ip_configuration")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[int]:
"""
The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InboundNatRuleResponse(dict):
"""
Inbound NAT rule of the load balancer.
"""
def __init__(__self__, *,
backend_ip_configuration: 'outputs.NetworkInterfaceIPConfigurationResponse',
backend_port: Optional[int] = None,
enable_floating_ip: Optional[bool] = None,
enable_tcp_reset: Optional[bool] = None,
etag: Optional[str] = None,
frontend_ip_configuration: Optional['outputs.SubResourceResponse'] = None,
frontend_port: Optional[int] = None,
id: Optional[str] = None,
idle_timeout_in_minutes: Optional[int] = None,
name: Optional[str] = None,
protocol: Optional[str] = None,
provisioning_state: Optional[str] = None):
"""
Inbound NAT rule of the load balancer.
:param 'NetworkInterfaceIPConfigurationResponseArgs' backend_ip_configuration: A reference to a private IP address defined on a network interface of a VM. Traffic sent to the frontend port of each of the frontend IP configurations is forwarded to the backend IP.
:param int backend_port: The port used for the internal endpoint. Acceptable values range from 1 to 65535.
:param bool enable_floating_ip: Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
:param bool enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param 'SubResourceResponseArgs' frontend_ip_configuration: A reference to frontend IP addresses.
:param int frontend_port: The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values range from 1 to 65534.
:param str id: Resource ID.
:param int idle_timeout_in_minutes: The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
:param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str protocol: The reference to the transport protocol used by the load balancing rule.
:param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
pulumi.set(__self__, "backend_ip_configuration", backend_ip_configuration)
if backend_port is not None:
pulumi.set(__self__, "backend_port", backend_port)
if enable_floating_ip is not None:
pulumi.set(__self__, "enable_floating_ip", enable_floating_ip)
if enable_tcp_reset is not None:
pulumi.set(__self__, "enable_tcp_reset", enable_tcp_reset)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if frontend_ip_configuration is not None:
pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration)
if frontend_port is not None:
pulumi.set(__self__, "frontend_port", frontend_port)
if id is not None:
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes is not None:
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if name is not None:
pulumi.set(__self__, "name", name)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="backendIPConfiguration")
def backend_ip_configuration(self) -> 'outputs.NetworkInterfaceIPConfigurationResponse':
"""
A reference to a private IP address defined on a network interface of a VM. Traffic sent to the frontend port of each of the frontend IP configurations is forwarded to the backend IP.
"""
return pulumi.get(self, "backend_ip_configuration")
@property
@pulumi.getter(name="backendPort")
def backend_port(self) -> Optional[int]:
"""
The port used for the internal endpoint. Acceptable values range from 1 to 65535.
"""
return pulumi.get(self, "backend_port")
@property
@pulumi.getter(name="enableFloatingIP")
def enable_floating_ip(self) -> Optional[bool]:
"""
Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
"""
return pulumi.get(self, "enable_floating_ip")
@property
@pulumi.getter(name="enableTcpReset")
def enable_tcp_reset(self) -> Optional[bool]:
"""
Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
"""
return pulumi.get(self, "enable_tcp_reset")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="frontendIPConfiguration")
def frontend_ip_configuration(self) -> Optional['outputs.SubResourceResponse']:
"""
A reference to frontend IP addresses.
"""
return pulumi.get(self, "frontend_ip_configuration")
@property
@pulumi.getter(name="frontendPort")
def frontend_port(self) -> Optional[int]:
"""
The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values range from 1 to 65534.
"""
return pulumi.get(self, "frontend_port")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[int]:
"""
The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
The reference to the transport protocol used by the load balancing rule.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IpTagResponse(dict):
"""
Contains the IpTag associated with the object.
"""
def __init__(__self__, *,
ip_tag_type: Optional[str] = None,
tag: Optional[str] = None):
"""
Contains the IpTag associated with the object.
:param str ip_tag_type: Gets or sets the ipTag type: Example FirstPartyUsage.
:param str tag: Gets or sets value of the IpTag associated with the public IP. Example SQL, Storage etc.
"""
if ip_tag_type is not None:
pulumi.set(__self__, "ip_tag_type", ip_tag_type)
if tag is not None:
pulumi.set(__self__, "tag", tag)
@property
@pulumi.getter(name="ipTagType")
def ip_tag_type(self) -> Optional[str]:
"""
Gets or sets the ipTag type: Example FirstPartyUsage.
"""
return pulumi.get(self, "ip_tag_type")
@property
@pulumi.getter
def tag(self) -> Optional[str]:
"""
Gets or sets value of the IpTag associated with the public IP. Example SQL, Storage etc.
"""
return pulumi.get(self, "tag")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IpsecPolicyResponse(dict):
"""
An IPSec Policy configuration for a virtual network gateway connection.
"""
def __init__(__self__, *,
dh_group: str,
ike_encryption: str,
ike_integrity: str,
ipsec_encryption: str,
ipsec_integrity: str,
pfs_group: str,
sa_data_size_kilobytes: int,
sa_life_time_seconds: int):
"""
An IPSec Policy configuration for a virtual network gateway connection.
:param str dh_group: The DH Group used in IKE Phase 1 for initial SA.
:param str ike_encryption: The IKE encryption algorithm (IKE phase 2).
:param str ike_integrity: The IKE integrity algorithm (IKE phase 2).
:param str ipsec_encryption: The IPSec encryption algorithm (IKE phase 1).
:param str ipsec_integrity: The IPSec integrity algorithm (IKE phase 1).
:param str pfs_group: The Pfs Group used in IKE Phase 2 for new child SA.
:param int sa_data_size_kilobytes: The IPSec Security Association (also called Quick Mode or Phase 2 SA) payload size in KB for a site to site VPN tunnel.
:param int sa_life_time_seconds: The IPSec Security Association (also called Quick Mode or Phase 2 SA) lifetime in seconds for a site to site VPN tunnel.
"""
pulumi.set(__self__, "dh_group", dh_group)
pulumi.set(__self__, "ike_encryption", ike_encryption)
pulumi.set(__self__, "ike_integrity", ike_integrity)
pulumi.set(__self__, "ipsec_encryption", ipsec_encryption)
pulumi.set(__self__, "ipsec_integrity", ipsec_integrity)
pulumi.set(__self__, "pfs_group", pfs_group)
pulumi.set(__self__, "sa_data_size_kilobytes", sa_data_size_kilobytes)
pulumi.set(__self__, "sa_life_time_seconds", sa_life_time_seconds)
@property
@pulumi.getter(name="dhGroup")
def dh_group(self) -> str:
"""
The DH Group used in IKE Phase 1 for initial SA.
"""
return pulumi.get(self, "dh_group")
@property
@pulumi.getter(name="ikeEncryption")
def ike_encryption(self) -> str:
"""
The IKE encryption algorithm (IKE phase 2).
"""
return pulumi.get(self, "ike_encryption")
@property
@pulumi.getter(name="ikeIntegrity")
def ike_integrity(self) -> str:
"""
The IKE integrity algorithm (IKE phase 2).
"""
return pulumi.get(self, "ike_integrity")
@property
@pulumi.getter(name="ipsecEncryption")
def ipsec_encryption(self) -> str:
"""
The IPSec encryption algorithm (IKE phase 1).
"""
return pulumi.get(self, "ipsec_encryption")
@property
@pulumi.getter(name="ipsecIntegrity")
def ipsec_integrity(self) -> str:
"""
The IPSec integrity algorithm (IKE phase 1).
"""
return pulumi.get(self, "ipsec_integrity")
@property
@pulumi.getter(name="pfsGroup")
def pfs_group(self) -> str:
"""
The Pfs Group used in IKE Phase 2 for new child SA.
"""
return pulumi.get(self, "pfs_group")
@property
@pulumi.getter(name="saDataSizeKilobytes")
def sa_data_size_kilobytes(self) -> int:
"""
The IPSec Security Association (also called Quick Mode or Phase 2 SA) payload size in KB for a site to site VPN tunnel.
"""
return pulumi.get(self, "sa_data_size_kilobytes")
@property
@pulumi.getter(name="saLifeTimeSeconds")
def sa_life_time_seconds(self) -> int:
"""
The IPSec Security Association (also called Quick Mode or Phase 2 SA) lifetime in seconds for a site to site VPN tunnel.
"""
return pulumi.get(self, "sa_life_time_seconds")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class Ipv6ExpressRouteCircuitPeeringConfigResponse(dict):
"""
Contains IPv6 peering config.
"""
def __init__(__self__, *,
microsoft_peering_config: Optional['outputs.ExpressRouteCircuitPeeringConfigResponse'] = None,
primary_peer_address_prefix: Optional[str] = None,
route_filter: Optional['outputs.SubResourceResponse'] = None,
secondary_peer_address_prefix: Optional[str] = None,
state: Optional[str] = None):
"""
Contains IPv6 peering config.
:param 'ExpressRouteCircuitPeeringConfigResponseArgs' microsoft_peering_config: The Microsoft peering configuration.
:param str primary_peer_address_prefix: The primary address prefix.
:param 'SubResourceResponseArgs' route_filter: The reference of the RouteFilter resource.
:param str secondary_peer_address_prefix: The secondary address prefix.
:param str state: The state of peering.
"""
if microsoft_peering_config is not None:
pulumi.set(__self__, "microsoft_peering_config", microsoft_peering_config)
if primary_peer_address_prefix is not None:
pulumi.set(__self__, "primary_peer_address_prefix", primary_peer_address_prefix)
if route_filter is not None:
pulumi.set(__self__, "route_filter", route_filter)
if secondary_peer_address_prefix is not None:
pulumi.set(__self__, "secondary_peer_address_prefix", secondary_peer_address_prefix)
if state is not None:
pulumi.set(__self__, "state", state)
@property
@pulumi.getter(name="microsoftPeeringConfig")
def microsoft_peering_config(self) -> Optional['outputs.ExpressRouteCircuitPeeringConfigResponse']:
"""
The Microsoft peering configuration.
"""
return pulumi.get(self, "microsoft_peering_config")
@property
@pulumi.getter(name="primaryPeerAddressPrefix")
def primary_peer_address_prefix(self) -> Optional[str]:
"""
The primary address prefix.
"""
return pulumi.get(self, "primary_peer_address_prefix")
@property
@pulumi.getter(name="routeFilter")
def route_filter(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference of the RouteFilter resource.
"""
return pulumi.get(self, "route_filter")
@property
@pulumi.getter(name="secondaryPeerAddressPrefix")
def secondary_peer_address_prefix(self) -> Optional[str]:
"""
The secondary address prefix.
"""
return pulumi.get(self, "secondary_peer_address_prefix")
@property
@pulumi.getter
def state(self) -> Optional[str]:
"""
The state of peering.
"""
return pulumi.get(self, "state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class KeyVaultCertificateSourceParametersResponseVault(dict):
"""
The Key Vault containing the SSL certificate
"""
def __init__(__self__, *,
id: Optional[str] = None):
"""
The Key Vault containing the SSL certificate
:param str id: Resource ID.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class LoadBalancerSkuResponse(dict):
"""
SKU of a load balancer.
"""
def __init__(__self__, *,
name: Optional[str] = None):
"""
SKU of a load balancer.
:param str name: Name of a load balancer SKU.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of a load balancer SKU.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class LoadBalancingRuleResponse(dict):
"""
A load balancing rule for a load balancer.
"""
def __init__(__self__, *,
frontend_port: int,
protocol: str,
backend_address_pool: Optional['outputs.SubResourceResponse'] = None,
backend_port: Optional[int] = None,
disable_outbound_snat: Optional[bool] = None,
enable_floating_ip: Optional[bool] = None,
enable_tcp_reset: Optional[bool] = None,
etag: Optional[str] = None,
frontend_ip_configuration: Optional['outputs.SubResourceResponse'] = None,
id: Optional[str] = None,
idle_timeout_in_minutes: Optional[int] = None,
load_distribution: Optional[str] = None,
name: Optional[str] = None,
probe: Optional['outputs.SubResourceResponse'] = None,
provisioning_state: Optional[str] = None):
"""
A load balancing rule for a load balancer.
:param int frontend_port: The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values are between 0 and 65534. Note that value 0 enables "Any Port".
:param str protocol: The reference to the transport protocol used by the load balancing rule.
:param 'SubResourceResponseArgs' backend_address_pool: A reference to a pool of DIPs. Inbound traffic is randomly load balanced across IPs in the backend IPs.
:param int backend_port: The port used for internal connections on the endpoint. Acceptable values are between 0 and 65535. Note that value 0 enables "Any Port".
:param bool disable_outbound_snat: Configures SNAT for the VMs in the backend pool to use the publicIP address specified in the frontend of the load balancing rule.
:param bool enable_floating_ip: Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
:param bool enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param 'SubResourceResponseArgs' frontend_ip_configuration: A reference to frontend IP addresses.
:param str id: Resource ID.
:param int idle_timeout_in_minutes: The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
:param str load_distribution: The load distribution policy for this rule.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param 'SubResourceResponseArgs' probe: The reference of the load balancer probe used by the load balancing rule.
:param str provisioning_state: Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
pulumi.set(__self__, "frontend_port", frontend_port)
pulumi.set(__self__, "protocol", protocol)
if backend_address_pool is not None:
pulumi.set(__self__, "backend_address_pool", backend_address_pool)
if backend_port is not None:
pulumi.set(__self__, "backend_port", backend_port)
if disable_outbound_snat is not None:
pulumi.set(__self__, "disable_outbound_snat", disable_outbound_snat)
if enable_floating_ip is not None:
pulumi.set(__self__, "enable_floating_ip", enable_floating_ip)
if enable_tcp_reset is not None:
pulumi.set(__self__, "enable_tcp_reset", enable_tcp_reset)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if frontend_ip_configuration is not None:
pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration)
if id is not None:
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes is not None:
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if load_distribution is not None:
pulumi.set(__self__, "load_distribution", load_distribution)
if name is not None:
pulumi.set(__self__, "name", name)
if probe is not None:
pulumi.set(__self__, "probe", probe)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="frontendPort")
def frontend_port(self) -> int:
"""
The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values are between 0 and 65534. Note that value 0 enables "Any Port".
"""
return pulumi.get(self, "frontend_port")
@property
@pulumi.getter
def protocol(self) -> str:
"""
The reference to the transport protocol used by the load balancing rule.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="backendAddressPool")
def backend_address_pool(self) -> Optional['outputs.SubResourceResponse']:
"""
A reference to a pool of DIPs. Inbound traffic is randomly load balanced across IPs in the backend IPs.
"""
return pulumi.get(self, "backend_address_pool")
@property
@pulumi.getter(name="backendPort")
def backend_port(self) -> Optional[int]:
"""
The port used for internal connections on the endpoint. Acceptable values are between 0 and 65535. Note that value 0 enables "Any Port".
"""
return pulumi.get(self, "backend_port")
@property
@pulumi.getter(name="disableOutboundSnat")
def disable_outbound_snat(self) -> Optional[bool]:
"""
Configures SNAT for the VMs in the backend pool to use the publicIP address specified in the frontend of the load balancing rule.
"""
return pulumi.get(self, "disable_outbound_snat")
@property
@pulumi.getter(name="enableFloatingIP")
def enable_floating_ip(self) -> Optional[bool]:
"""
Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
"""
return pulumi.get(self, "enable_floating_ip")
@property
@pulumi.getter(name="enableTcpReset")
def enable_tcp_reset(self) -> Optional[bool]:
"""
Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
"""
return pulumi.get(self, "enable_tcp_reset")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="frontendIPConfiguration")
def frontend_ip_configuration(self) -> Optional['outputs.SubResourceResponse']:
"""
A reference to frontend IP addresses.
"""
return pulumi.get(self, "frontend_ip_configuration")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[int]:
"""
The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter(name="loadDistribution")
def load_distribution(self) -> Optional[str]:
"""
The load distribution policy for this rule.
"""
return pulumi.get(self, "load_distribution")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def probe(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference of the load balancer probe used by the load balancing rule.
"""
return pulumi.get(self, "probe")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class LoadBalancingSettingsModelResponse(dict):
"""
Load balancing settings for a backend pool
"""
def __init__(__self__, *,
resource_state: str,
type: str,
additional_latency_milliseconds: Optional[int] = None,
id: Optional[str] = None,
name: Optional[str] = None,
sample_size: Optional[int] = None,
successful_samples_required: Optional[int] = None):
"""
Load balancing settings for a backend pool
:param str resource_state: Resource status.
:param str type: Resource type.
:param int additional_latency_milliseconds: The additional latency in milliseconds for probes to fall into the lowest latency bucket
:param str id: Resource ID.
:param str name: Resource name.
:param int sample_size: The number of samples to consider for load balancing decisions
:param int successful_samples_required: The number of samples within the sample period that must succeed
"""
pulumi.set(__self__, "resource_state", resource_state)
pulumi.set(__self__, "type", type)
if additional_latency_milliseconds is not None:
pulumi.set(__self__, "additional_latency_milliseconds", additional_latency_milliseconds)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if sample_size is not None:
pulumi.set(__self__, "sample_size", sample_size)
if successful_samples_required is not None:
pulumi.set(__self__, "successful_samples_required", successful_samples_required)
@property
@pulumi.getter(name="resourceState")
def resource_state(self) -> str:
"""
Resource status.
"""
return pulumi.get(self, "resource_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="additionalLatencyMilliseconds")
def additional_latency_milliseconds(self) -> Optional[int]:
"""
The additional latency in milliseconds for probes to fall into the lowest latency bucket
"""
return pulumi.get(self, "additional_latency_milliseconds")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="sampleSize")
def sample_size(self) -> Optional[int]:
"""
The number of samples to consider for load balancing decisions
"""
return pulumi.get(self, "sample_size")
@property
@pulumi.getter(name="successfulSamplesRequired")
def successful_samples_required(self) -> Optional[int]:
"""
The number of samples within the sample period that must succeed
"""
return pulumi.get(self, "successful_samples_required")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class LocalNetworkGatewayResponse(dict):
"""
A common class for general resource information.
"""
def __init__(__self__, *,
name: str,
provisioning_state: str,
type: str,
bgp_settings: Optional['outputs.BgpSettingsResponse'] = None,
etag: Optional[str] = None,
gateway_ip_address: Optional[str] = None,
id: Optional[str] = None,
local_network_address_space: Optional['outputs.AddressSpaceResponse'] = None,
location: Optional[str] = None,
resource_guid: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None):
"""
A common class for general resource information.
:param str name: Resource name.
:param str provisioning_state: The provisioning state of the LocalNetworkGateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str type: Resource type.
:param 'BgpSettingsResponseArgs' bgp_settings: Local network gateway's BGP speaker settings.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str gateway_ip_address: IP address of local network gateway.
:param str id: Resource ID.
:param 'AddressSpaceResponseArgs' local_network_address_space: Local network site address space.
:param str location: Resource location.
:param str resource_guid: The resource GUID property of the LocalNetworkGateway resource.
:param Mapping[str, str] tags: Resource tags.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if bgp_settings is not None:
pulumi.set(__self__, "bgp_settings", bgp_settings)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if gateway_ip_address is not None:
pulumi.set(__self__, "gateway_ip_address", gateway_ip_address)
if id is not None:
pulumi.set(__self__, "id", id)
if local_network_address_space is not None:
pulumi.set(__self__, "local_network_address_space", local_network_address_space)
if location is not None:
pulumi.set(__self__, "location", location)
if resource_guid is not None:
pulumi.set(__self__, "resource_guid", resource_guid)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the LocalNetworkGateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="bgpSettings")
def bgp_settings(self) -> Optional['outputs.BgpSettingsResponse']:
"""
Local network gateway's BGP speaker settings.
"""
return pulumi.get(self, "bgp_settings")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="gatewayIpAddress")
def gateway_ip_address(self) -> Optional[str]:
"""
IP address of local network gateway.
"""
return pulumi.get(self, "gateway_ip_address")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="localNetworkAddressSpace")
def local_network_address_space(self) -> Optional['outputs.AddressSpaceResponse']:
"""
Local network site address space.
"""
return pulumi.get(self, "local_network_address_space")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the LocalNetworkGateway resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedServiceIdentityResponse(dict):
"""
Identity for the resource.
"""
def __init__(__self__, *,
principal_id: str,
tenant_id: str,
type: Optional[str] = None,
user_assigned_identities: Optional[Mapping[str, 'outputs.ManagedServiceIdentityResponseUserAssignedIdentities']] = None):
"""
Identity for the resource.
:param str principal_id: The principal id of the system assigned identity. This property will only be provided for a system assigned identity.
:param str tenant_id: The tenant id of the system assigned identity. This property will only be provided for a system assigned identity.
:param str type: The type of identity used for the resource. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the virtual machine.
:param Mapping[str, 'ManagedServiceIdentityResponseUserAssignedIdentitiesArgs'] user_assigned_identities: The list of user identities associated with resource. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "tenant_id", tenant_id)
if type is not None:
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal id of the system assigned identity. This property will only be provided for a system assigned identity.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant id of the system assigned identity. This property will only be provided for a system assigned identity.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The type of identity used for the resource. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the virtual machine.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[Mapping[str, 'outputs.ManagedServiceIdentityResponseUserAssignedIdentities']]:
"""
The list of user identities associated with resource. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
return pulumi.get(self, "user_assigned_identities")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedServiceIdentityResponseUserAssignedIdentities(dict):
def __init__(__self__, *,
client_id: str,
principal_id: str):
"""
:param str client_id: The client id of user assigned identity.
:param str principal_id: The principal id of user assigned identity.
"""
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "principal_id", principal_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
"""
The client id of user assigned identity.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal id of user assigned identity.
"""
return pulumi.get(self, "principal_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class MatchConditionResponse(dict):
"""
Define match conditions.
"""
def __init__(__self__, *,
match_values: Sequence[str],
match_variables: Sequence['outputs.MatchVariableResponse'],
operator: str,
negation_conditon: Optional[bool] = None,
transforms: Optional[Sequence[str]] = None):
"""
Define match conditions.
:param Sequence[str] match_values: Match value.
:param Sequence['MatchVariableResponseArgs'] match_variables: List of match variables.
:param str operator: Describes operator to be matched.
:param bool negation_conditon: Describes if this is negate condition or not.
:param Sequence[str] transforms: List of transforms.
"""
pulumi.set(__self__, "match_values", match_values)
pulumi.set(__self__, "match_variables", match_variables)
pulumi.set(__self__, "operator", operator)
if negation_conditon is not None:
pulumi.set(__self__, "negation_conditon", negation_conditon)
if transforms is not None:
pulumi.set(__self__, "transforms", transforms)
@property
@pulumi.getter(name="matchValues")
def match_values(self) -> Sequence[str]:
"""
Match value.
"""
return pulumi.get(self, "match_values")
@property
@pulumi.getter(name="matchVariables")
def match_variables(self) -> Sequence['outputs.MatchVariableResponse']:
"""
List of match variables.
"""
return pulumi.get(self, "match_variables")
@property
@pulumi.getter
def operator(self) -> str:
"""
Describes operator to be matched.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter(name="negationConditon")
def negation_conditon(self) -> Optional[bool]:
"""
Describes if this is negate condition or not.
"""
return pulumi.get(self, "negation_conditon")
@property
@pulumi.getter
def transforms(self) -> Optional[Sequence[str]]:
"""
List of transforms.
"""
return pulumi.get(self, "transforms")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class MatchVariableResponse(dict):
"""
Define match variables.
"""
def __init__(__self__, *,
variable_name: str,
selector: Optional[str] = None):
"""
Define match variables.
:param str variable_name: Match Variable.
:param str selector: Describes field of the matchVariable collection.
"""
pulumi.set(__self__, "variable_name", variable_name)
if selector is not None:
pulumi.set(__self__, "selector", selector)
@property
@pulumi.getter(name="variableName")
def variable_name(self) -> str:
"""
Match Variable.
"""
return pulumi.get(self, "variable_name")
@property
@pulumi.getter
def selector(self) -> Optional[str]:
"""
Describes field of the matchVariable collection.
"""
return pulumi.get(self, "selector")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NatGatewaySkuResponse(dict):
"""
SKU of nat gateway.
"""
def __init__(__self__, *,
name: Optional[str] = None):
"""
SKU of nat gateway.
:param str name: Name of Nat Gateway SKU.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of Nat Gateway SKU.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NetworkInterfaceDnsSettingsResponse(dict):
"""
DNS settings of a network interface.
"""
def __init__(__self__, *,
applied_dns_servers: Optional[Sequence[str]] = None,
dns_servers: Optional[Sequence[str]] = None,
internal_dns_name_label: Optional[str] = None,
internal_domain_name_suffix: Optional[str] = None,
internal_fqdn: Optional[str] = None):
"""
DNS settings of a network interface.
:param Sequence[str] applied_dns_servers: If the VM that uses this NIC is part of an Availability Set, then this list will have the union of all DNS servers from all NICs that are part of the Availability Set. This property is what is configured on each of those VMs.
:param Sequence[str] dns_servers: List of DNS servers IP addresses. Use 'AzureProvidedDNS' to switch to azure provided DNS resolution. 'AzureProvidedDNS' value cannot be combined with other IPs, it must be the only value in dnsServers collection.
:param str internal_dns_name_label: Relative DNS name for this NIC used for internal communications between VMs in the same virtual network.
:param str internal_domain_name_suffix: Even if internalDnsNameLabel is not specified, a DNS entry is created for the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of internalDomainNameSuffix.
:param str internal_fqdn: Fully qualified DNS name supporting internal communications between VMs in the same virtual network.
"""
if applied_dns_servers is not None:
pulumi.set(__self__, "applied_dns_servers", applied_dns_servers)
if dns_servers is not None:
pulumi.set(__self__, "dns_servers", dns_servers)
if internal_dns_name_label is not None:
pulumi.set(__self__, "internal_dns_name_label", internal_dns_name_label)
if internal_domain_name_suffix is not None:
pulumi.set(__self__, "internal_domain_name_suffix", internal_domain_name_suffix)
if internal_fqdn is not None:
pulumi.set(__self__, "internal_fqdn", internal_fqdn)
@property
@pulumi.getter(name="appliedDnsServers")
def applied_dns_servers(self) -> Optional[Sequence[str]]:
"""
If the VM that uses this NIC is part of an Availability Set, then this list will have the union of all DNS servers from all NICs that are part of the Availability Set. This property is what is configured on each of those VMs.
"""
return pulumi.get(self, "applied_dns_servers")
@property
@pulumi.getter(name="dnsServers")
def dns_servers(self) -> Optional[Sequence[str]]:
"""
List of DNS servers IP addresses. Use 'AzureProvidedDNS' to switch to azure provided DNS resolution. 'AzureProvidedDNS' value cannot be combined with other IPs, it must be the only value in dnsServers collection.
"""
return pulumi.get(self, "dns_servers")
@property
@pulumi.getter(name="internalDnsNameLabel")
def internal_dns_name_label(self) -> Optional[str]:
"""
Relative DNS name for this NIC used for internal communications between VMs in the same virtual network.
"""
return pulumi.get(self, "internal_dns_name_label")
@property
@pulumi.getter(name="internalDomainNameSuffix")
def internal_domain_name_suffix(self) -> Optional[str]:
"""
Even if internalDnsNameLabel is not specified, a DNS entry is created for the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of internalDomainNameSuffix.
"""
return pulumi.get(self, "internal_domain_name_suffix")
@property
@pulumi.getter(name="internalFqdn")
def internal_fqdn(self) -> Optional[str]:
"""
Fully qualified DNS name supporting internal communications between VMs in the same virtual network.
"""
return pulumi.get(self, "internal_fqdn")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NetworkInterfaceIPConfigurationResponse(dict):
"""
IPConfiguration in a network interface.
"""
def __init__(__self__, *,
application_gateway_backend_address_pools: Optional[Sequence['outputs.ApplicationGatewayBackendAddressPoolResponse']] = None,
application_security_groups: Optional[Sequence['outputs.ApplicationSecurityGroupResponse']] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
load_balancer_backend_address_pools: Optional[Sequence['outputs.BackendAddressPoolResponse']] = None,
load_balancer_inbound_nat_rules: Optional[Sequence['outputs.InboundNatRuleResponse']] = None,
name: Optional[str] = None,
primary: Optional[bool] = None,
private_ip_address: Optional[str] = None,
private_ip_address_version: Optional[str] = None,
private_ip_allocation_method: Optional[str] = None,
provisioning_state: Optional[str] = None,
public_ip_address: Optional['outputs.PublicIPAddressResponse'] = None,
subnet: Optional['outputs.SubnetResponse'] = None,
virtual_network_taps: Optional[Sequence['outputs.VirtualNetworkTapResponse']] = None):
"""
IPConfiguration in a network interface.
:param Sequence['ApplicationGatewayBackendAddressPoolResponseArgs'] application_gateway_backend_address_pools: The reference of ApplicationGatewayBackendAddressPool resource.
:param Sequence['ApplicationSecurityGroupResponseArgs'] application_security_groups: Application security groups in which the IP configuration is included.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param Sequence['BackendAddressPoolResponseArgs'] load_balancer_backend_address_pools: The reference of LoadBalancerBackendAddressPool resource.
:param Sequence['InboundNatRuleResponseArgs'] load_balancer_inbound_nat_rules: A list of references of LoadBalancerInboundNatRules.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param bool primary: Gets whether this is a primary customer address on the network interface.
:param str private_ip_address: Private IP address of the IP configuration.
:param str private_ip_address_version: Available from Api-Version 2016-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4.
:param str private_ip_allocation_method: The private IP address allocation method.
:param str provisioning_state: The provisioning state of the network interface IP configuration. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param 'PublicIPAddressResponseArgs' public_ip_address: Public IP address bound to the IP configuration.
:param 'SubnetResponseArgs' subnet: Subnet bound to the IP configuration.
:param Sequence['VirtualNetworkTapResponseArgs'] virtual_network_taps: The reference to Virtual Network Taps.
"""
if application_gateway_backend_address_pools is not None:
pulumi.set(__self__, "application_gateway_backend_address_pools", application_gateway_backend_address_pools)
if application_security_groups is not None:
pulumi.set(__self__, "application_security_groups", application_security_groups)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if load_balancer_backend_address_pools is not None:
pulumi.set(__self__, "load_balancer_backend_address_pools", load_balancer_backend_address_pools)
if load_balancer_inbound_nat_rules is not None:
pulumi.set(__self__, "load_balancer_inbound_nat_rules", load_balancer_inbound_nat_rules)
if name is not None:
pulumi.set(__self__, "name", name)
if primary is not None:
pulumi.set(__self__, "primary", primary)
if private_ip_address is not None:
pulumi.set(__self__, "private_ip_address", private_ip_address)
if private_ip_address_version is not None:
pulumi.set(__self__, "private_ip_address_version", private_ip_address_version)
if private_ip_allocation_method is not None:
pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address is not None:
pulumi.set(__self__, "public_ip_address", public_ip_address)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if virtual_network_taps is not None:
pulumi.set(__self__, "virtual_network_taps", virtual_network_taps)
@property
@pulumi.getter(name="applicationGatewayBackendAddressPools")
def application_gateway_backend_address_pools(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendAddressPoolResponse']]:
"""
The reference of ApplicationGatewayBackendAddressPool resource.
"""
return pulumi.get(self, "application_gateway_backend_address_pools")
@property
@pulumi.getter(name="applicationSecurityGroups")
def application_security_groups(self) -> Optional[Sequence['outputs.ApplicationSecurityGroupResponse']]:
"""
Application security groups in which the IP configuration is included.
"""
return pulumi.get(self, "application_security_groups")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="loadBalancerBackendAddressPools")
def load_balancer_backend_address_pools(self) -> Optional[Sequence['outputs.BackendAddressPoolResponse']]:
"""
The reference of LoadBalancerBackendAddressPool resource.
"""
return pulumi.get(self, "load_balancer_backend_address_pools")
@property
@pulumi.getter(name="loadBalancerInboundNatRules")
def load_balancer_inbound_nat_rules(self) -> Optional[Sequence['outputs.InboundNatRuleResponse']]:
"""
A list of references of LoadBalancerInboundNatRules.
"""
return pulumi.get(self, "load_balancer_inbound_nat_rules")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def primary(self) -> Optional[bool]:
"""
Gets whether this is a primary customer address on the network interface.
"""
return pulumi.get(self, "primary")
@property
@pulumi.getter(name="privateIPAddress")
def private_ip_address(self) -> Optional[str]:
"""
Private IP address of the IP configuration.
"""
return pulumi.get(self, "private_ip_address")
@property
@pulumi.getter(name="privateIPAddressVersion")
def private_ip_address_version(self) -> Optional[str]:
"""
Available from Api-Version 2016-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4.
"""
return pulumi.get(self, "private_ip_address_version")
@property
@pulumi.getter(name="privateIPAllocationMethod")
def private_ip_allocation_method(self) -> Optional[str]:
"""
The private IP address allocation method.
"""
return pulumi.get(self, "private_ip_allocation_method")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the network interface IP configuration. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddress")
def public_ip_address(self) -> Optional['outputs.PublicIPAddressResponse']:
"""
Public IP address bound to the IP configuration.
"""
return pulumi.get(self, "public_ip_address")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubnetResponse']:
"""
Subnet bound to the IP configuration.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter(name="virtualNetworkTaps")
def virtual_network_taps(self) -> Optional[Sequence['outputs.VirtualNetworkTapResponse']]:
"""
The reference to Virtual Network Taps.
"""
return pulumi.get(self, "virtual_network_taps")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NetworkInterfaceResponse(dict):
"""
A network interface in a resource group.
"""
def __init__(__self__, *,
hosted_workloads: Sequence[str],
name: str,
private_endpoint: 'outputs.PrivateEndpointResponse',
type: str,
virtual_machine: 'outputs.SubResourceResponse',
dns_settings: Optional['outputs.NetworkInterfaceDnsSettingsResponse'] = None,
enable_accelerated_networking: Optional[bool] = None,
enable_ip_forwarding: Optional[bool] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
ip_configurations: Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']] = None,
location: Optional[str] = None,
mac_address: Optional[str] = None,
network_security_group: Optional['outputs.NetworkSecurityGroupResponse'] = None,
primary: Optional[bool] = None,
provisioning_state: Optional[str] = None,
resource_guid: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
tap_configurations: Optional[Sequence['outputs.NetworkInterfaceTapConfigurationResponse']] = None):
"""
A network interface in a resource group.
:param Sequence[str] hosted_workloads: A list of references to linked BareMetal resources.
:param str name: Resource name.
:param 'PrivateEndpointResponseArgs' private_endpoint: A reference to the private endpoint to which the network interface is linked.
:param str type: Resource type.
:param 'SubResourceResponseArgs' virtual_machine: The reference of a virtual machine.
:param 'NetworkInterfaceDnsSettingsResponseArgs' dns_settings: The DNS settings in network interface.
:param bool enable_accelerated_networking: If the network interface is accelerated networking enabled.
:param bool enable_ip_forwarding: Indicates whether IP forwarding is enabled on this network interface.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param Sequence['NetworkInterfaceIPConfigurationResponseArgs'] ip_configurations: A list of IPConfigurations of the network interface.
:param str location: Resource location.
:param str mac_address: The MAC address of the network interface.
:param 'NetworkSecurityGroupResponseArgs' network_security_group: The reference of the NetworkSecurityGroup resource.
:param bool primary: Gets whether this is a primary network interface on a virtual machine.
:param str provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str resource_guid: The resource GUID property of the network interface resource.
:param Mapping[str, str] tags: Resource tags.
:param Sequence['NetworkInterfaceTapConfigurationResponseArgs'] tap_configurations: A list of TapConfigurations of the network interface.
"""
pulumi.set(__self__, "hosted_workloads", hosted_workloads)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "private_endpoint", private_endpoint)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "virtual_machine", virtual_machine)
if dns_settings is not None:
pulumi.set(__self__, "dns_settings", dns_settings)
if enable_accelerated_networking is not None:
pulumi.set(__self__, "enable_accelerated_networking", enable_accelerated_networking)
if enable_ip_forwarding is not None:
pulumi.set(__self__, "enable_ip_forwarding", enable_ip_forwarding)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if ip_configurations is not None:
pulumi.set(__self__, "ip_configurations", ip_configurations)
if location is not None:
pulumi.set(__self__, "location", location)
if mac_address is not None:
pulumi.set(__self__, "mac_address", mac_address)
if network_security_group is not None:
pulumi.set(__self__, "network_security_group", network_security_group)
if primary is not None:
pulumi.set(__self__, "primary", primary)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid is not None:
pulumi.set(__self__, "resource_guid", resource_guid)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tap_configurations is not None:
pulumi.set(__self__, "tap_configurations", tap_configurations)
@property
@pulumi.getter(name="hostedWorkloads")
def hosted_workloads(self) -> Sequence[str]:
"""
A list of references to linked BareMetal resources.
"""
return pulumi.get(self, "hosted_workloads")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> 'outputs.PrivateEndpointResponse':
"""
A reference to the private endpoint to which the network interface is linked.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualMachine")
def virtual_machine(self) -> 'outputs.SubResourceResponse':
"""
The reference of a virtual machine.
"""
return pulumi.get(self, "virtual_machine")
@property
@pulumi.getter(name="dnsSettings")
def dns_settings(self) -> Optional['outputs.NetworkInterfaceDnsSettingsResponse']:
"""
The DNS settings in network interface.
"""
return pulumi.get(self, "dns_settings")
@property
@pulumi.getter(name="enableAcceleratedNetworking")
def enable_accelerated_networking(self) -> Optional[bool]:
"""
If the network interface is accelerated networking enabled.
"""
return pulumi.get(self, "enable_accelerated_networking")
@property
@pulumi.getter(name="enableIPForwarding")
def enable_ip_forwarding(self) -> Optional[bool]:
"""
Indicates whether IP forwarding is enabled on this network interface.
"""
return pulumi.get(self, "enable_ip_forwarding")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']]:
"""
A list of IPConfigurations of the network interface.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="macAddress")
def mac_address(self) -> Optional[str]:
"""
The MAC address of the network interface.
"""
return pulumi.get(self, "mac_address")
@property
@pulumi.getter(name="networkSecurityGroup")
def network_security_group(self) -> Optional['outputs.NetworkSecurityGroupResponse']:
"""
The reference of the NetworkSecurityGroup resource.
"""
return pulumi.get(self, "network_security_group")
@property
@pulumi.getter
def primary(self) -> Optional[bool]:
"""
Gets whether this is a primary network interface on a virtual machine.
"""
return pulumi.get(self, "primary")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the network interface resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tapConfigurations")
def tap_configurations(self) -> Optional[Sequence['outputs.NetworkInterfaceTapConfigurationResponse']]:
"""
A list of TapConfigurations of the network interface.
"""
return pulumi.get(self, "tap_configurations")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NetworkInterfaceTapConfigurationResponse(dict):
"""
Tap configuration in a Network Interface.
"""
def __init__(__self__, *,
provisioning_state: str,
type: str,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
virtual_network_tap: Optional['outputs.VirtualNetworkTapResponse'] = None):
"""
Tap configuration in a Network Interface.
:param str provisioning_state: The provisioning state of the network interface tap configuration. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str type: Sub Resource type.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param 'VirtualNetworkTapResponseArgs' virtual_network_tap: The reference of the Virtual Network Tap resource.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if virtual_network_tap is not None:
pulumi.set(__self__, "virtual_network_tap", virtual_network_tap)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the network interface tap configuration. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Sub Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="virtualNetworkTap")
def virtual_network_tap(self) -> Optional['outputs.VirtualNetworkTapResponse']:
"""
The reference of the Virtual Network Tap resource.
"""
return pulumi.get(self, "virtual_network_tap")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NetworkSecurityGroupResponse(dict):
"""
NetworkSecurityGroup resource.
"""
def __init__(__self__, *,
name: str,
network_interfaces: Sequence['outputs.NetworkInterfaceResponse'],
subnets: Sequence['outputs.SubnetResponse'],
type: str,
default_security_rules: Optional[Sequence['outputs.SecurityRuleResponse']] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
location: Optional[str] = None,
provisioning_state: Optional[str] = None,
resource_guid: Optional[str] = None,
security_rules: Optional[Sequence['outputs.SecurityRuleResponse']] = None,
tags: Optional[Mapping[str, str]] = None):
"""
NetworkSecurityGroup resource.
:param str name: Resource name.
:param Sequence['NetworkInterfaceResponseArgs'] network_interfaces: A collection of references to network interfaces.
:param Sequence['SubnetResponseArgs'] subnets: A collection of references to subnets.
:param str type: Resource type.
:param Sequence['SecurityRuleResponseArgs'] default_security_rules: The default security rules of network security group.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str location: Resource location.
:param str provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str resource_guid: The resource GUID property of the network security group resource.
:param Sequence['SecurityRuleResponseArgs'] security_rules: A collection of security rules of the network security group.
:param Mapping[str, str] tags: Resource tags.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "network_interfaces", network_interfaces)
pulumi.set(__self__, "subnets", subnets)
pulumi.set(__self__, "type", type)
if default_security_rules is not None:
pulumi.set(__self__, "default_security_rules", default_security_rules)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid is not None:
pulumi.set(__self__, "resource_guid", resource_guid)
if security_rules is not None:
pulumi.set(__self__, "security_rules", security_rules)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Sequence['outputs.NetworkInterfaceResponse']:
"""
A collection of references to network interfaces.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter
def subnets(self) -> Sequence['outputs.SubnetResponse']:
"""
A collection of references to subnets.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="defaultSecurityRules")
def default_security_rules(self) -> Optional[Sequence['outputs.SecurityRuleResponse']]:
"""
The default security rules of network security group.
"""
return pulumi.get(self, "default_security_rules")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the network security group resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="securityRules")
def security_rules(self) -> Optional[Sequence['outputs.SecurityRuleResponse']]:
"""
A collection of security rules of the network security group.
"""
return pulumi.get(self, "security_rules")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class OutboundRuleResponse(dict):
"""
Outbound rule of the load balancer.
"""
def __init__(__self__, *,
backend_address_pool: 'outputs.SubResourceResponse',
frontend_ip_configurations: Sequence['outputs.SubResourceResponse'],
protocol: str,
allocated_outbound_ports: Optional[int] = None,
enable_tcp_reset: Optional[bool] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
idle_timeout_in_minutes: Optional[int] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None):
"""
Outbound rule of the load balancer.
:param 'SubResourceResponseArgs' backend_address_pool: A reference to a pool of DIPs. Outbound traffic is randomly load balanced across IPs in the backend IPs.
:param Sequence['SubResourceResponseArgs'] frontend_ip_configurations: The Frontend IP addresses of the load balancer.
:param str protocol: The protocol for the outbound rule in load balancer.
:param int allocated_outbound_ports: The number of outbound ports to be used for NAT.
:param bool enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param int idle_timeout_in_minutes: The timeout for the TCP idle connection.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str provisioning_state: Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
pulumi.set(__self__, "backend_address_pool", backend_address_pool)
pulumi.set(__self__, "frontend_ip_configurations", frontend_ip_configurations)
pulumi.set(__self__, "protocol", protocol)
if allocated_outbound_ports is not None:
pulumi.set(__self__, "allocated_outbound_ports", allocated_outbound_ports)
if enable_tcp_reset is not None:
pulumi.set(__self__, "enable_tcp_reset", enable_tcp_reset)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes is not None:
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="backendAddressPool")
def backend_address_pool(self) -> 'outputs.SubResourceResponse':
"""
A reference to a pool of DIPs. Outbound traffic is randomly load balanced across IPs in the backend IPs.
"""
return pulumi.get(self, "backend_address_pool")
@property
@pulumi.getter(name="frontendIPConfigurations")
def frontend_ip_configurations(self) -> Sequence['outputs.SubResourceResponse']:
"""
The Frontend IP addresses of the load balancer.
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter
def protocol(self) -> str:
"""
The protocol for the outbound rule in load balancer.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="allocatedOutboundPorts")
def allocated_outbound_ports(self) -> Optional[int]:
"""
The number of outbound ports to be used for NAT.
"""
return pulumi.get(self, "allocated_outbound_ports")
@property
@pulumi.getter(name="enableTcpReset")
def enable_tcp_reset(self) -> Optional[bool]:
"""
Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
"""
return pulumi.get(self, "enable_tcp_reset")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[int]:
"""
The timeout for the TCP idle connection.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class P2SVpnServerConfigRadiusClientRootCertificateResponse(dict):
"""
Radius client root certificate of P2SVpnServerConfiguration.
"""
def __init__(__self__, *,
provisioning_state: str,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
thumbprint: Optional[str] = None):
"""
Radius client root certificate of P2SVpnServerConfiguration.
:param str provisioning_state: The provisioning state of the Radius client root certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str thumbprint: The Radius client root certificate thumbprint.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if thumbprint is not None:
pulumi.set(__self__, "thumbprint", thumbprint)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the Radius client root certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def thumbprint(self) -> Optional[str]:
"""
The Radius client root certificate thumbprint.
"""
return pulumi.get(self, "thumbprint")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class P2SVpnServerConfigRadiusServerRootCertificateResponse(dict):
"""
Radius Server root certificate of P2SVpnServerConfiguration.
"""
def __init__(__self__, *,
provisioning_state: str,
public_cert_data: str,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None):
"""
Radius Server root certificate of P2SVpnServerConfiguration.
:param str provisioning_state: The provisioning state of the P2SVpnServerConfiguration Radius Server root certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str public_cert_data: The certificate public data.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "public_cert_data", public_cert_data)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the P2SVpnServerConfiguration Radius Server root certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicCertData")
def public_cert_data(self) -> str:
"""
The certificate public data.
"""
return pulumi.get(self, "public_cert_data")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class P2SVpnServerConfigVpnClientRevokedCertificateResponse(dict):
"""
VPN client revoked certificate of P2SVpnServerConfiguration.
"""
def __init__(__self__, *,
provisioning_state: str,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
thumbprint: Optional[str] = None):
"""
VPN client revoked certificate of P2SVpnServerConfiguration.
:param str provisioning_state: The provisioning state of the VPN client revoked certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str thumbprint: The revoked VPN client certificate thumbprint.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if thumbprint is not None:
pulumi.set(__self__, "thumbprint", thumbprint)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the VPN client revoked certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def thumbprint(self) -> Optional[str]:
"""
The revoked VPN client certificate thumbprint.
"""
return pulumi.get(self, "thumbprint")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class P2SVpnServerConfigVpnClientRootCertificateResponse(dict):
"""
VPN client root certificate of P2SVpnServerConfiguration.
"""
def __init__(__self__, *,
provisioning_state: str,
public_cert_data: str,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None):
"""
VPN client root certificate of P2SVpnServerConfiguration.
:param str provisioning_state: The provisioning state of the P2SVpnServerConfiguration VPN client root certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str public_cert_data: The certificate public data.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "public_cert_data", public_cert_data)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the P2SVpnServerConfiguration VPN client root certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicCertData")
def public_cert_data(self) -> str:
"""
The certificate public data.
"""
return pulumi.get(self, "public_cert_data")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class P2SVpnServerConfigurationResponse(dict):
"""
P2SVpnServerConfiguration Resource.
"""
def __init__(__self__, *,
etag: str,
p2_s_vpn_gateways: Sequence['outputs.SubResourceResponse'],
provisioning_state: str,
id: Optional[str] = None,
name: Optional[str] = None,
p2_s_vpn_server_config_radius_client_root_certificates: Optional[Sequence['outputs.P2SVpnServerConfigRadiusClientRootCertificateResponse']] = None,
p2_s_vpn_server_config_radius_server_root_certificates: Optional[Sequence['outputs.P2SVpnServerConfigRadiusServerRootCertificateResponse']] = None,
p2_s_vpn_server_config_vpn_client_revoked_certificates: Optional[Sequence['outputs.P2SVpnServerConfigVpnClientRevokedCertificateResponse']] = None,
p2_s_vpn_server_config_vpn_client_root_certificates: Optional[Sequence['outputs.P2SVpnServerConfigVpnClientRootCertificateResponse']] = None,
radius_server_address: Optional[str] = None,
radius_server_secret: Optional[str] = None,
vpn_client_ipsec_policies: Optional[Sequence['outputs.IpsecPolicyResponse']] = None,
vpn_protocols: Optional[Sequence[str]] = None):
"""
P2SVpnServerConfiguration Resource.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param Sequence['SubResourceResponseArgs'] p2_s_vpn_gateways: List of references to P2SVpnGateways.
:param str provisioning_state: The provisioning state of the P2SVpnServerConfiguration resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str id: Resource ID.
:param str name: The name of the P2SVpnServerConfiguration that is unique within a VirtualWan in a resource group. This name can be used to access the resource along with Paren VirtualWan resource name.
:param Sequence['P2SVpnServerConfigRadiusClientRootCertificateResponseArgs'] p2_s_vpn_server_config_radius_client_root_certificates: Radius client root certificate of P2SVpnServerConfiguration.
:param Sequence['P2SVpnServerConfigRadiusServerRootCertificateResponseArgs'] p2_s_vpn_server_config_radius_server_root_certificates: Radius Server root certificate of P2SVpnServerConfiguration.
:param Sequence['P2SVpnServerConfigVpnClientRevokedCertificateResponseArgs'] p2_s_vpn_server_config_vpn_client_revoked_certificates: VPN client revoked certificate of P2SVpnServerConfiguration.
:param Sequence['P2SVpnServerConfigVpnClientRootCertificateResponseArgs'] p2_s_vpn_server_config_vpn_client_root_certificates: VPN client root certificate of P2SVpnServerConfiguration.
:param str radius_server_address: The radius server address property of the P2SVpnServerConfiguration resource for point to site client connection.
:param str radius_server_secret: The radius secret property of the P2SVpnServerConfiguration resource for point to site client connection.
:param Sequence['IpsecPolicyResponseArgs'] vpn_client_ipsec_policies: VpnClientIpsecPolicies for P2SVpnServerConfiguration.
:param Sequence[str] vpn_protocols: VPN protocols for the P2SVpnServerConfiguration.
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "p2_s_vpn_gateways", p2_s_vpn_gateways)
pulumi.set(__self__, "provisioning_state", provisioning_state)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if p2_s_vpn_server_config_radius_client_root_certificates is not None:
pulumi.set(__self__, "p2_s_vpn_server_config_radius_client_root_certificates", p2_s_vpn_server_config_radius_client_root_certificates)
if p2_s_vpn_server_config_radius_server_root_certificates is not None:
pulumi.set(__self__, "p2_s_vpn_server_config_radius_server_root_certificates", p2_s_vpn_server_config_radius_server_root_certificates)
if p2_s_vpn_server_config_vpn_client_revoked_certificates is not None:
pulumi.set(__self__, "p2_s_vpn_server_config_vpn_client_revoked_certificates", p2_s_vpn_server_config_vpn_client_revoked_certificates)
if p2_s_vpn_server_config_vpn_client_root_certificates is not None:
pulumi.set(__self__, "p2_s_vpn_server_config_vpn_client_root_certificates", p2_s_vpn_server_config_vpn_client_root_certificates)
if radius_server_address is not None:
pulumi.set(__self__, "radius_server_address", radius_server_address)
if radius_server_secret is not None:
pulumi.set(__self__, "radius_server_secret", radius_server_secret)
if vpn_client_ipsec_policies is not None:
pulumi.set(__self__, "vpn_client_ipsec_policies", vpn_client_ipsec_policies)
if vpn_protocols is not None:
pulumi.set(__self__, "vpn_protocols", vpn_protocols)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="p2SVpnGateways")
def p2_s_vpn_gateways(self) -> Sequence['outputs.SubResourceResponse']:
"""
List of references to P2SVpnGateways.
"""
return pulumi.get(self, "p2_s_vpn_gateways")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the P2SVpnServerConfiguration resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the P2SVpnServerConfiguration that is unique within a VirtualWan in a resource group. This name can be used to access the resource along with Paren VirtualWan resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="p2SVpnServerConfigRadiusClientRootCertificates")
def p2_s_vpn_server_config_radius_client_root_certificates(self) -> Optional[Sequence['outputs.P2SVpnServerConfigRadiusClientRootCertificateResponse']]:
"""
Radius client root certificate of P2SVpnServerConfiguration.
"""
return pulumi.get(self, "p2_s_vpn_server_config_radius_client_root_certificates")
@property
@pulumi.getter(name="p2SVpnServerConfigRadiusServerRootCertificates")
def p2_s_vpn_server_config_radius_server_root_certificates(self) -> Optional[Sequence['outputs.P2SVpnServerConfigRadiusServerRootCertificateResponse']]:
"""
Radius Server root certificate of P2SVpnServerConfiguration.
"""
return pulumi.get(self, "p2_s_vpn_server_config_radius_server_root_certificates")
@property
@pulumi.getter(name="p2SVpnServerConfigVpnClientRevokedCertificates")
def p2_s_vpn_server_config_vpn_client_revoked_certificates(self) -> Optional[Sequence['outputs.P2SVpnServerConfigVpnClientRevokedCertificateResponse']]:
"""
VPN client revoked certificate of P2SVpnServerConfiguration.
"""
return pulumi.get(self, "p2_s_vpn_server_config_vpn_client_revoked_certificates")
@property
@pulumi.getter(name="p2SVpnServerConfigVpnClientRootCertificates")
def p2_s_vpn_server_config_vpn_client_root_certificates(self) -> Optional[Sequence['outputs.P2SVpnServerConfigVpnClientRootCertificateResponse']]:
"""
VPN client root certificate of P2SVpnServerConfiguration.
"""
return pulumi.get(self, "p2_s_vpn_server_config_vpn_client_root_certificates")
@property
@pulumi.getter(name="radiusServerAddress")
def radius_server_address(self) -> Optional[str]:
"""
The radius server address property of the P2SVpnServerConfiguration resource for point to site client connection.
"""
return pulumi.get(self, "radius_server_address")
@property
@pulumi.getter(name="radiusServerSecret")
def radius_server_secret(self) -> Optional[str]:
"""
The radius secret property of the P2SVpnServerConfiguration resource for point to site client connection.
"""
return pulumi.get(self, "radius_server_secret")
@property
@pulumi.getter(name="vpnClientIpsecPolicies")
def vpn_client_ipsec_policies(self) -> Optional[Sequence['outputs.IpsecPolicyResponse']]:
"""
VpnClientIpsecPolicies for P2SVpnServerConfiguration.
"""
return pulumi.get(self, "vpn_client_ipsec_policies")
@property
@pulumi.getter(name="vpnProtocols")
def vpn_protocols(self) -> Optional[Sequence[str]]:
"""
VPN protocols for the P2SVpnServerConfiguration.
"""
return pulumi.get(self, "vpn_protocols")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PacketCaptureFilterResponse(dict):
"""
Filter that is applied to packet capture request. Multiple filters can be applied.
"""
def __init__(__self__, *,
local_ip_address: Optional[str] = None,
local_port: Optional[str] = None,
protocol: Optional[str] = None,
remote_ip_address: Optional[str] = None,
remote_port: Optional[str] = None):
"""
Filter that is applied to packet capture request. Multiple filters can be applied.
:param str local_ip_address: Local IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5"? for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null.
:param str local_port: Local port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null.
:param str protocol: Protocol to be filtered on.
:param str remote_ip_address: Local IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null.
:param str remote_port: Remote port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null.
"""
if local_ip_address is not None:
pulumi.set(__self__, "local_ip_address", local_ip_address)
if local_port is not None:
pulumi.set(__self__, "local_port", local_port)
if protocol is None:
protocol = 'Any'
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if remote_ip_address is not None:
pulumi.set(__self__, "remote_ip_address", remote_ip_address)
if remote_port is not None:
pulumi.set(__self__, "remote_port", remote_port)
@property
@pulumi.getter(name="localIPAddress")
def local_ip_address(self) -> Optional[str]:
"""
Local IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5"? for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null.
"""
return pulumi.get(self, "local_ip_address")
@property
@pulumi.getter(name="localPort")
def local_port(self) -> Optional[str]:
"""
Local port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null.
"""
return pulumi.get(self, "local_port")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
Protocol to be filtered on.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="remoteIPAddress")
def remote_ip_address(self) -> Optional[str]:
"""
Local IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null.
"""
return pulumi.get(self, "remote_ip_address")
@property
@pulumi.getter(name="remotePort")
def remote_port(self) -> Optional[str]:
"""
Remote port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null.
"""
return pulumi.get(self, "remote_port")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PacketCaptureStorageLocationResponse(dict):
"""
Describes the storage location for a packet capture session.
"""
def __init__(__self__, *,
file_path: Optional[str] = None,
storage_id: Optional[str] = None,
storage_path: Optional[str] = None):
"""
Describes the storage location for a packet capture session.
:param str file_path: A valid local path on the targeting VM. Must include the name of the capture file (*.cap). For linux virtual machine it must start with /var/captures. Required if no storage ID is provided, otherwise optional.
:param str storage_id: The ID of the storage account to save the packet capture session. Required if no local file path is provided.
:param str storage_path: The URI of the storage path to save the packet capture. Must be a well-formed URI describing the location to save the packet capture.
"""
if file_path is not None:
pulumi.set(__self__, "file_path", file_path)
if storage_id is not None:
pulumi.set(__self__, "storage_id", storage_id)
if storage_path is not None:
pulumi.set(__self__, "storage_path", storage_path)
@property
@pulumi.getter(name="filePath")
def file_path(self) -> Optional[str]:
"""
A valid local path on the targeting VM. Must include the name of the capture file (*.cap). For linux virtual machine it must start with /var/captures. Required if no storage ID is provided, otherwise optional.
"""
return pulumi.get(self, "file_path")
@property
@pulumi.getter(name="storageId")
def storage_id(self) -> Optional[str]:
"""
The ID of the storage account to save the packet capture session. Required if no local file path is provided.
"""
return pulumi.get(self, "storage_id")
@property
@pulumi.getter(name="storagePath")
def storage_path(self) -> Optional[str]:
"""
The URI of the storage path to save the packet capture. Must be a well-formed URI describing the location to save the packet capture.
"""
return pulumi.get(self, "storage_path")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PeerExpressRouteCircuitConnectionResponse(dict):
"""
Peer Express Route Circuit Connection in an ExpressRouteCircuitPeering resource.
"""
def __init__(__self__, *,
circuit_connection_status: str,
etag: str,
provisioning_state: str,
type: str,
address_prefix: Optional[str] = None,
auth_resource_guid: Optional[str] = None,
connection_name: Optional[str] = None,
express_route_circuit_peering: Optional['outputs.SubResourceResponse'] = None,
id: Optional[str] = None,
name: Optional[str] = None,
peer_express_route_circuit_peering: Optional['outputs.SubResourceResponse'] = None):
"""
Peer Express Route Circuit Connection in an ExpressRouteCircuitPeering resource.
:param str circuit_connection_status: Express Route Circuit connection state.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str provisioning_state: Provisioning state of the peer express route circuit connection resource. Possible values are: 'Succeeded', 'Updating', 'Deleting', and 'Failed'.
:param str type: Type of the resource.
:param str address_prefix: /29 IP address space to carve out Customer addresses for tunnels.
:param str auth_resource_guid: The resource guid of the authorization used for the express route circuit connection.
:param str connection_name: The name of the express route circuit connection resource.
:param 'SubResourceResponseArgs' express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the circuit.
:param str id: Resource ID.
:param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param 'SubResourceResponseArgs' peer_express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the peered circuit.
"""
pulumi.set(__self__, "circuit_connection_status", circuit_connection_status)
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if address_prefix is not None:
pulumi.set(__self__, "address_prefix", address_prefix)
if auth_resource_guid is not None:
pulumi.set(__self__, "auth_resource_guid", auth_resource_guid)
if connection_name is not None:
pulumi.set(__self__, "connection_name", connection_name)
if express_route_circuit_peering is not None:
pulumi.set(__self__, "express_route_circuit_peering", express_route_circuit_peering)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if peer_express_route_circuit_peering is not None:
pulumi.set(__self__, "peer_express_route_circuit_peering", peer_express_route_circuit_peering)
@property
@pulumi.getter(name="circuitConnectionStatus")
def circuit_connection_status(self) -> str:
"""
Express Route Circuit connection state.
"""
return pulumi.get(self, "circuit_connection_status")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the peer express route circuit connection resource. Possible values are: 'Succeeded', 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> Optional[str]:
"""
/29 IP address space to carve out Customer addresses for tunnels.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter(name="authResourceGuid")
def auth_resource_guid(self) -> Optional[str]:
"""
The resource guid of the authorization used for the express route circuit connection.
"""
return pulumi.get(self, "auth_resource_guid")
@property
@pulumi.getter(name="connectionName")
def connection_name(self) -> Optional[str]:
"""
The name of the express route circuit connection resource.
"""
return pulumi.get(self, "connection_name")
@property
@pulumi.getter(name="expressRouteCircuitPeering")
def express_route_circuit_peering(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference to Express Route Circuit Private Peering Resource of the circuit.
"""
return pulumi.get(self, "express_route_circuit_peering")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peerExpressRouteCircuitPeering")
def peer_express_route_circuit_peering(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference to Express Route Circuit Private Peering Resource of the peered circuit.
"""
return pulumi.get(self, "peer_express_route_circuit_peering")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PolicySettingsResponse(dict):
"""
Defines contents of a web application firewall global configuration.
"""
def __init__(__self__, *,
enabled_state: Optional[str] = None,
mode: Optional[str] = None):
"""
Defines contents of a web application firewall global configuration.
:param str enabled_state: Describes if the policy is in enabled state or disabled state.
:param str mode: Describes if it is in detection mode or prevention mode at policy level.
"""
if enabled_state is not None:
pulumi.set(__self__, "enabled_state", enabled_state)
if mode is not None:
pulumi.set(__self__, "mode", mode)
@property
@pulumi.getter(name="enabledState")
def enabled_state(self) -> Optional[str]:
"""
Describes if the policy is in enabled state or disabled state.
"""
return pulumi.get(self, "enabled_state")
@property
@pulumi.getter
def mode(self) -> Optional[str]:
"""
Describes if it is in detection mode or prevention mode at policy level.
"""
return pulumi.get(self, "mode")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateEndpointConnectionResponse(dict):
"""
PrivateEndpointConnection resource.
"""
def __init__(__self__, *,
etag: str,
provisioning_state: str,
type: str,
id: Optional[str] = None,
name: Optional[str] = None,
private_endpoint: Optional['outputs.PrivateEndpointResponse'] = None,
private_link_service_connection_state: Optional['outputs.PrivateLinkServiceConnectionStateResponse'] = None):
"""
PrivateEndpointConnection resource.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str provisioning_state: The provisioning state of the private endpoint connection.
:param str type: The resource type.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param 'PrivateEndpointResponseArgs' private_endpoint: The resource of private end point.
:param 'PrivateLinkServiceConnectionStateResponseArgs' private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider.
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state is not None:
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint connection.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional['outputs.PrivateLinkServiceConnectionStateResponse']:
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateEndpointResponse(dict):
"""
Private endpoint resource.
"""
def __init__(__self__, *,
name: str,
network_interfaces: Sequence['outputs.NetworkInterfaceResponse'],
provisioning_state: str,
type: str,
etag: Optional[str] = None,
id: Optional[str] = None,
location: Optional[str] = None,
manual_private_link_service_connections: Optional[Sequence['outputs.PrivateLinkServiceConnectionResponse']] = None,
private_link_service_connections: Optional[Sequence['outputs.PrivateLinkServiceConnectionResponse']] = None,
subnet: Optional['outputs.SubnetResponse'] = None,
tags: Optional[Mapping[str, str]] = None):
"""
Private endpoint resource.
:param str name: Resource name.
:param Sequence['NetworkInterfaceResponseArgs'] network_interfaces: Gets an array of references to the network interfaces created for this private endpoint.
:param str provisioning_state: The provisioning state of the private endpoint.
:param str type: Resource type.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str location: Resource location.
:param Sequence['PrivateLinkServiceConnectionResponseArgs'] manual_private_link_service_connections: A grouping of information about the connection to the remote resource. Used when the network admin does not have access to approve connections to the remote resource.
:param Sequence['PrivateLinkServiceConnectionResponseArgs'] private_link_service_connections: A grouping of information about the connection to the remote resource.
:param 'SubnetResponseArgs' subnet: The ID of the subnet from which the private IP will be allocated.
:param Mapping[str, str] tags: Resource tags.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "network_interfaces", network_interfaces)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if manual_private_link_service_connections is not None:
pulumi.set(__self__, "manual_private_link_service_connections", manual_private_link_service_connections)
if private_link_service_connections is not None:
pulumi.set(__self__, "private_link_service_connections", private_link_service_connections)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Sequence['outputs.NetworkInterfaceResponse']:
"""
Gets an array of references to the network interfaces created for this private endpoint.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="manualPrivateLinkServiceConnections")
def manual_private_link_service_connections(self) -> Optional[Sequence['outputs.PrivateLinkServiceConnectionResponse']]:
"""
A grouping of information about the connection to the remote resource. Used when the network admin does not have access to approve connections to the remote resource.
"""
return pulumi.get(self, "manual_private_link_service_connections")
@property
@pulumi.getter(name="privateLinkServiceConnections")
def private_link_service_connections(self) -> Optional[Sequence['outputs.PrivateLinkServiceConnectionResponse']]:
"""
A grouping of information about the connection to the remote resource.
"""
return pulumi.get(self, "private_link_service_connections")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubnetResponse']:
"""
The ID of the subnet from which the private IP will be allocated.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateLinkServiceConnectionResponse(dict):
"""
PrivateLinkServiceConnection resource.
"""
def __init__(__self__, *,
etag: str,
provisioning_state: str,
type: str,
group_ids: Optional[Sequence[str]] = None,
id: Optional[str] = None,
name: Optional[str] = None,
private_link_service_connection_state: Optional['outputs.PrivateLinkServiceConnectionStateResponse'] = None,
private_link_service_id: Optional[str] = None,
request_message: Optional[str] = None):
"""
PrivateLinkServiceConnection resource.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str provisioning_state: The provisioning state of the private link service connection.
:param str type: The resource type.
:param Sequence[str] group_ids: The ID(s) of the group(s) obtained from the remote resource that this private endpoint should connect to.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param 'PrivateLinkServiceConnectionStateResponseArgs' private_link_service_connection_state: A collection of read-only information about the state of the connection to the remote resource.
:param str private_link_service_id: The resource id of private link service.
:param str request_message: A message passed to the owner of the remote resource with this connection request. Restricted to 140 chars.
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if group_ids is not None:
pulumi.set(__self__, "group_ids", group_ids)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_link_service_connection_state is not None:
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if private_link_service_id is not None:
pulumi.set(__self__, "private_link_service_id", private_link_service_id)
if request_message is not None:
pulumi.set(__self__, "request_message", request_message)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private link service connection.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="groupIds")
def group_ids(self) -> Optional[Sequence[str]]:
"""
The ID(s) of the group(s) obtained from the remote resource that this private endpoint should connect to.
"""
return pulumi.get(self, "group_ids")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional['outputs.PrivateLinkServiceConnectionStateResponse']:
"""
A collection of read-only information about the state of the connection to the remote resource.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="privateLinkServiceId")
def private_link_service_id(self) -> Optional[str]:
"""
The resource id of private link service.
"""
return pulumi.get(self, "private_link_service_id")
@property
@pulumi.getter(name="requestMessage")
def request_message(self) -> Optional[str]:
"""
A message passed to the owner of the remote resource with this connection request. Restricted to 140 chars.
"""
return pulumi.get(self, "request_message")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateLinkServiceConnectionStateResponse(dict):
"""
A collection of information about the state of the connection between service consumer and provider.
"""
def __init__(__self__, *,
actions_required: Optional[str] = None,
description: Optional[str] = None,
status: Optional[str] = None):
"""
A collection of information about the state of the connection between service consumer and provider.
:param str actions_required: A message indicating if changes on the service provider require any updates on the consumer.
:param str description: The reason for approval/rejection of the connection.
:param str status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
if actions_required is not None:
pulumi.set(__self__, "actions_required", actions_required)
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="actionsRequired")
def actions_required(self) -> Optional[str]:
"""
A message indicating if changes on the service provider require any updates on the consumer.
"""
return pulumi.get(self, "actions_required")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The reason for approval/rejection of the connection.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
return pulumi.get(self, "status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateLinkServiceIpConfigurationResponse(dict):
"""
The private link service ip configuration.
"""
def __init__(__self__, *,
etag: str,
provisioning_state: str,
type: str,
id: Optional[str] = None,
name: Optional[str] = None,
primary: Optional[bool] = None,
private_ip_address: Optional[str] = None,
private_ip_address_version: Optional[str] = None,
private_ip_allocation_method: Optional[str] = None,
subnet: Optional['outputs.SubnetResponse'] = None):
"""
The private link service ip configuration.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str provisioning_state: The provisioning state of the private link service ip configuration.
:param str type: The resource type.
:param str id: Resource ID.
:param str name: The name of private link service ip configuration.
:param bool primary: Whether the ip configuration is primary or not.
:param str private_ip_address: The private IP address of the IP configuration.
:param str private_ip_address_version: Available from Api-Version 2016-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4.
:param str private_ip_allocation_method: The private IP address allocation method.
:param 'SubnetResponseArgs' subnet: The reference of the subnet resource.
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if primary is not None:
pulumi.set(__self__, "primary", primary)
if private_ip_address is not None:
pulumi.set(__self__, "private_ip_address", private_ip_address)
if private_ip_address_version is not None:
pulumi.set(__self__, "private_ip_address_version", private_ip_address_version)
if private_ip_allocation_method is not None:
pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private link service ip configuration.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of private link service ip configuration.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def primary(self) -> Optional[bool]:
"""
Whether the ip configuration is primary or not.
"""
return pulumi.get(self, "primary")
@property
@pulumi.getter(name="privateIPAddress")
def private_ip_address(self) -> Optional[str]:
"""
The private IP address of the IP configuration.
"""
return pulumi.get(self, "private_ip_address")
@property
@pulumi.getter(name="privateIPAddressVersion")
def private_ip_address_version(self) -> Optional[str]:
"""
Available from Api-Version 2016-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4.
"""
return pulumi.get(self, "private_ip_address_version")
@property
@pulumi.getter(name="privateIPAllocationMethod")
def private_ip_allocation_method(self) -> Optional[str]:
"""
The private IP address allocation method.
"""
return pulumi.get(self, "private_ip_allocation_method")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubnetResponse']:
"""
The reference of the subnet resource.
"""
return pulumi.get(self, "subnet")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateLinkServicePropertiesResponseAutoApproval(dict):
"""
The auto-approval list of the private link service.
"""
def __init__(__self__, *,
subscriptions: Optional[Sequence[str]] = None):
"""
The auto-approval list of the private link service.
:param Sequence[str] subscriptions: The list of subscriptions.
"""
if subscriptions is not None:
pulumi.set(__self__, "subscriptions", subscriptions)
@property
@pulumi.getter
def subscriptions(self) -> Optional[Sequence[str]]:
"""
The list of subscriptions.
"""
return pulumi.get(self, "subscriptions")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateLinkServicePropertiesResponseVisibility(dict):
"""
The visibility list of the private link service.
"""
def __init__(__self__, *,
subscriptions: Optional[Sequence[str]] = None):
"""
The visibility list of the private link service.
:param Sequence[str] subscriptions: The list of subscriptions.
"""
if subscriptions is not None:
pulumi.set(__self__, "subscriptions", subscriptions)
@property
@pulumi.getter
def subscriptions(self) -> Optional[Sequence[str]]:
"""
The list of subscriptions.
"""
return pulumi.get(self, "subscriptions")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ProbeResponse(dict):
"""
A load balancer probe.
"""
def __init__(__self__, *,
load_balancing_rules: Sequence['outputs.SubResourceResponse'],
port: int,
protocol: str,
etag: Optional[str] = None,
id: Optional[str] = None,
interval_in_seconds: Optional[int] = None,
name: Optional[str] = None,
number_of_probes: Optional[int] = None,
provisioning_state: Optional[str] = None,
request_path: Optional[str] = None):
"""
A load balancer probe.
:param Sequence['SubResourceResponseArgs'] load_balancing_rules: The load balancer rules that use this probe.
:param int port: The port for communicating the probe. Possible values range from 1 to 65535, inclusive.
:param str protocol: The protocol of the end point. If 'Tcp' is specified, a received ACK is required for the probe to be successful. If 'Http' or 'Https' is specified, a 200 OK response from the specifies URI is required for the probe to be successful.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param int interval_in_seconds: The interval, in seconds, for how frequently to probe the endpoint for health status. Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5.
:param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param int number_of_probes: The number of probes where if no response, will result in stopping further traffic from being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower than the typical times used in Azure.
:param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str request_path: The URI used for requesting health status from the VM. Path is required if a protocol is set to http. Otherwise, it is not allowed. There is no default value.
"""
pulumi.set(__self__, "load_balancing_rules", load_balancing_rules)
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "protocol", protocol)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if interval_in_seconds is not None:
pulumi.set(__self__, "interval_in_seconds", interval_in_seconds)
if name is not None:
pulumi.set(__self__, "name", name)
if number_of_probes is not None:
pulumi.set(__self__, "number_of_probes", number_of_probes)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if request_path is not None:
pulumi.set(__self__, "request_path", request_path)
@property
@pulumi.getter(name="loadBalancingRules")
def load_balancing_rules(self) -> Sequence['outputs.SubResourceResponse']:
"""
The load balancer rules that use this probe.
"""
return pulumi.get(self, "load_balancing_rules")
@property
@pulumi.getter
def port(self) -> int:
"""
The port for communicating the probe. Possible values range from 1 to 65535, inclusive.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> str:
"""
The protocol of the end point. If 'Tcp' is specified, a received ACK is required for the probe to be successful. If 'Http' or 'Https' is specified, a 200 OK response from the specifies URI is required for the probe to be successful.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="intervalInSeconds")
def interval_in_seconds(self) -> Optional[int]:
"""
The interval, in seconds, for how frequently to probe the endpoint for health status. Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5.
"""
return pulumi.get(self, "interval_in_seconds")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="numberOfProbes")
def number_of_probes(self) -> Optional[int]:
"""
The number of probes where if no response, will result in stopping further traffic from being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower than the typical times used in Azure.
"""
return pulumi.get(self, "number_of_probes")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="requestPath")
def request_path(self) -> Optional[str]:
"""
The URI used for requesting health status from the VM. Path is required if a protocol is set to http. Otherwise, it is not allowed. There is no default value.
"""
return pulumi.get(self, "request_path")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ProtocolCustomSettingsFormatResponse(dict):
"""
DDoS custom policy properties.
"""
def __init__(__self__, *,
protocol: Optional[str] = None,
source_rate_override: Optional[str] = None,
trigger_rate_override: Optional[str] = None,
trigger_sensitivity_override: Optional[str] = None):
"""
DDoS custom policy properties.
:param str protocol: The protocol for which the DDoS protection policy is being customized.
:param str source_rate_override: The customized DDoS protection source rate.
:param str trigger_rate_override: The customized DDoS protection trigger rate.
:param str trigger_sensitivity_override: The customized DDoS protection trigger rate sensitivity degrees. High: Trigger rate set with most sensitivity w.r.t. normal traffic. Default: Trigger rate set with moderate sensitivity w.r.t. normal traffic. Low: Trigger rate set with less sensitivity w.r.t. normal traffic. Relaxed: Trigger rate set with least sensitivity w.r.t. normal traffic.
"""
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if source_rate_override is not None:
pulumi.set(__self__, "source_rate_override", source_rate_override)
if trigger_rate_override is not None:
pulumi.set(__self__, "trigger_rate_override", trigger_rate_override)
if trigger_sensitivity_override is not None:
pulumi.set(__self__, "trigger_sensitivity_override", trigger_sensitivity_override)
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
The protocol for which the DDoS protection policy is being customized.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="sourceRateOverride")
def source_rate_override(self) -> Optional[str]:
"""
The customized DDoS protection source rate.
"""
return pulumi.get(self, "source_rate_override")
@property
@pulumi.getter(name="triggerRateOverride")
def trigger_rate_override(self) -> Optional[str]:
"""
The customized DDoS protection trigger rate.
"""
return pulumi.get(self, "trigger_rate_override")
@property
@pulumi.getter(name="triggerSensitivityOverride")
def trigger_sensitivity_override(self) -> Optional[str]:
"""
The customized DDoS protection trigger rate sensitivity degrees. High: Trigger rate set with most sensitivity w.r.t. normal traffic. Default: Trigger rate set with moderate sensitivity w.r.t. normal traffic. Low: Trigger rate set with less sensitivity w.r.t. normal traffic. Relaxed: Trigger rate set with least sensitivity w.r.t. normal traffic.
"""
return pulumi.get(self, "trigger_sensitivity_override")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PublicIPAddressDnsSettingsResponse(dict):
"""
Contains FQDN of the DNS record associated with the public IP address.
"""
def __init__(__self__, *,
domain_name_label: Optional[str] = None,
fqdn: Optional[str] = None,
reverse_fqdn: Optional[str] = None):
"""
Contains FQDN of the DNS record associated with the public IP address.
:param str domain_name_label: Gets or sets the Domain name label.The concatenation of the domain name label and the regionalized DNS zone make up the fully qualified domain name associated with the public IP address. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system.
:param str fqdn: Gets the FQDN, Fully qualified domain name of the A DNS record associated with the public IP. This is the concatenation of the domainNameLabel and the regionalized DNS zone.
:param str reverse_fqdn: Gets or Sets the Reverse FQDN. A user-visible, fully qualified domain name that resolves to this public IP address. If the reverseFqdn is specified, then a PTR DNS record is created pointing from the IP address in the in-addr.arpa domain to the reverse FQDN.
"""
if domain_name_label is not None:
pulumi.set(__self__, "domain_name_label", domain_name_label)
if fqdn is not None:
pulumi.set(__self__, "fqdn", fqdn)
if reverse_fqdn is not None:
pulumi.set(__self__, "reverse_fqdn", reverse_fqdn)
@property
@pulumi.getter(name="domainNameLabel")
def domain_name_label(self) -> Optional[str]:
"""
Gets or sets the Domain name label.The concatenation of the domain name label and the regionalized DNS zone make up the fully qualified domain name associated with the public IP address. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system.
"""
return pulumi.get(self, "domain_name_label")
@property
@pulumi.getter
def fqdn(self) -> Optional[str]:
"""
Gets the FQDN, Fully qualified domain name of the A DNS record associated with the public IP. This is the concatenation of the domainNameLabel and the regionalized DNS zone.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="reverseFqdn")
def reverse_fqdn(self) -> Optional[str]:
"""
Gets or Sets the Reverse FQDN. A user-visible, fully qualified domain name that resolves to this public IP address. If the reverseFqdn is specified, then a PTR DNS record is created pointing from the IP address in the in-addr.arpa domain to the reverse FQDN.
"""
return pulumi.get(self, "reverse_fqdn")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PublicIPAddressResponse(dict):
"""
Public IP address resource.
"""
def __init__(__self__, *,
ip_configuration: 'outputs.IPConfigurationResponse',
name: str,
type: str,
ddos_settings: Optional['outputs.DdosSettingsResponse'] = None,
dns_settings: Optional['outputs.PublicIPAddressDnsSettingsResponse'] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
idle_timeout_in_minutes: Optional[int] = None,
ip_address: Optional[str] = None,
ip_tags: Optional[Sequence['outputs.IpTagResponse']] = None,
location: Optional[str] = None,
provisioning_state: Optional[str] = None,
public_ip_address_version: Optional[str] = None,
public_ip_allocation_method: Optional[str] = None,
public_ip_prefix: Optional['outputs.SubResourceResponse'] = None,
resource_guid: Optional[str] = None,
sku: Optional['outputs.PublicIPAddressSkuResponse'] = None,
tags: Optional[Mapping[str, str]] = None,
zones: Optional[Sequence[str]] = None):
"""
Public IP address resource.
:param 'IPConfigurationResponseArgs' ip_configuration: The IP configuration associated with the public IP address.
:param str name: Resource name.
:param str type: Resource type.
:param 'DdosSettingsResponseArgs' ddos_settings: The DDoS protection custom policy associated with the public IP address.
:param 'PublicIPAddressDnsSettingsResponseArgs' dns_settings: The FQDN of the DNS record associated with the public IP address.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param int idle_timeout_in_minutes: The idle timeout of the public IP address.
:param str ip_address: The IP address associated with the public IP address resource.
:param Sequence['IpTagResponseArgs'] ip_tags: The list of tags associated with the public IP address.
:param str location: Resource location.
:param str provisioning_state: The provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str public_ip_address_version: The public IP address version.
:param str public_ip_allocation_method: The public IP address allocation method.
:param 'SubResourceResponseArgs' public_ip_prefix: The Public IP Prefix this Public IP Address should be allocated from.
:param str resource_guid: The resource GUID property of the public IP resource.
:param 'PublicIPAddressSkuResponseArgs' sku: The public IP address SKU.
:param Mapping[str, str] tags: Resource tags.
:param Sequence[str] zones: A list of availability zones denoting the IP allocated for the resource needs to come from.
"""
pulumi.set(__self__, "ip_configuration", ip_configuration)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "type", type)
if ddos_settings is not None:
pulumi.set(__self__, "ddos_settings", ddos_settings)
if dns_settings is not None:
pulumi.set(__self__, "dns_settings", dns_settings)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes is not None:
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if ip_tags is not None:
pulumi.set(__self__, "ip_tags", ip_tags)
if location is not None:
pulumi.set(__self__, "location", location)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address_version is not None:
pulumi.set(__self__, "public_ip_address_version", public_ip_address_version)
if public_ip_allocation_method is not None:
pulumi.set(__self__, "public_ip_allocation_method", public_ip_allocation_method)
if public_ip_prefix is not None:
pulumi.set(__self__, "public_ip_prefix", public_ip_prefix)
if resource_guid is not None:
pulumi.set(__self__, "resource_guid", resource_guid)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if zones is not None:
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="ipConfiguration")
def ip_configuration(self) -> 'outputs.IPConfigurationResponse':
"""
The IP configuration associated with the public IP address.
"""
return pulumi.get(self, "ip_configuration")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="ddosSettings")
def ddos_settings(self) -> Optional['outputs.DdosSettingsResponse']:
"""
The DDoS protection custom policy associated with the public IP address.
"""
return pulumi.get(self, "ddos_settings")
@property
@pulumi.getter(name="dnsSettings")
def dns_settings(self) -> Optional['outputs.PublicIPAddressDnsSettingsResponse']:
"""
The FQDN of the DNS record associated with the public IP address.
"""
return pulumi.get(self, "dns_settings")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[int]:
"""
The idle timeout of the public IP address.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
"""
The IP address associated with the public IP address resource.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="ipTags")
def ip_tags(self) -> Optional[Sequence['outputs.IpTagResponse']]:
"""
The list of tags associated with the public IP address.
"""
return pulumi.get(self, "ip_tags")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddressVersion")
def public_ip_address_version(self) -> Optional[str]:
"""
The public IP address version.
"""
return pulumi.get(self, "public_ip_address_version")
@property
@pulumi.getter(name="publicIPAllocationMethod")
def public_ip_allocation_method(self) -> Optional[str]:
"""
The public IP address allocation method.
"""
return pulumi.get(self, "public_ip_allocation_method")
@property
@pulumi.getter(name="publicIPPrefix")
def public_ip_prefix(self) -> Optional['outputs.SubResourceResponse']:
"""
The Public IP Prefix this Public IP Address should be allocated from.
"""
return pulumi.get(self, "public_ip_prefix")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the public IP resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.PublicIPAddressSkuResponse']:
"""
The public IP address SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def zones(self) -> Optional[Sequence[str]]:
"""
A list of availability zones denoting the IP allocated for the resource needs to come from.
"""
return pulumi.get(self, "zones")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PublicIPAddressSkuResponse(dict):
"""
SKU of a public IP address.
"""
def __init__(__self__, *,
name: Optional[str] = None):
"""
SKU of a public IP address.
:param str name: Name of a public IP address SKU.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of a public IP address SKU.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PublicIPPrefixSkuResponse(dict):
"""
SKU of a public IP prefix.
"""
def __init__(__self__, *,
name: Optional[str] = None):
"""
SKU of a public IP prefix.
:param str name: Name of a public IP prefix SKU.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of a public IP prefix SKU.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RedirectConfigurationResponse(dict):
"""
Describes Redirect Route.
"""
def __init__(__self__, *,
odata_type: str,
custom_fragment: Optional[str] = None,
custom_host: Optional[str] = None,
custom_path: Optional[str] = None,
custom_query_string: Optional[str] = None,
redirect_protocol: Optional[str] = None,
redirect_type: Optional[str] = None):
"""
Describes Redirect Route.
:param str odata_type:
Expected value is '#Microsoft.Azure.FrontDoor.Models.FrontdoorRedirectConfiguration'.
:param str custom_fragment: Fragment to add to the redirect URL. Fragment is the part of the URL that comes after #. Do not include the #.
:param str custom_host: Host to redirect. Leave empty to use the incoming host as the destination host.
:param str custom_path: The full path to redirect. Path cannot be empty and must start with /. Leave empty to use the incoming path as destination path.
:param str custom_query_string: The set of query strings to be placed in the redirect URL. Setting this value would replace any existing query string; leave empty to preserve the incoming query string. Query string must be in <key>=<value> format. The first ? and & will be added automatically so do not include them in the front, but do separate multiple query strings with &.
:param str redirect_protocol: The protocol of the destination to where the traffic is redirected
:param str redirect_type: The redirect type the rule will use when redirecting traffic.
"""
pulumi.set(__self__, "odata_type", '#Microsoft.Azure.FrontDoor.Models.FrontdoorRedirectConfiguration')
if custom_fragment is not None:
pulumi.set(__self__, "custom_fragment", custom_fragment)
if custom_host is not None:
pulumi.set(__self__, "custom_host", custom_host)
if custom_path is not None:
pulumi.set(__self__, "custom_path", custom_path)
if custom_query_string is not None:
pulumi.set(__self__, "custom_query_string", custom_query_string)
if redirect_protocol is not None:
pulumi.set(__self__, "redirect_protocol", redirect_protocol)
if redirect_type is not None:
pulumi.set(__self__, "redirect_type", redirect_type)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> str:
"""
Expected value is '#Microsoft.Azure.FrontDoor.Models.FrontdoorRedirectConfiguration'.
"""
return pulumi.get(self, "odata_type")
@property
@pulumi.getter(name="customFragment")
def custom_fragment(self) -> Optional[str]:
"""
Fragment to add to the redirect URL. Fragment is the part of the URL that comes after #. Do not include the #.
"""
return pulumi.get(self, "custom_fragment")
@property
@pulumi.getter(name="customHost")
def custom_host(self) -> Optional[str]:
"""
Host to redirect. Leave empty to use the incoming host as the destination host.
"""
return pulumi.get(self, "custom_host")
@property
@pulumi.getter(name="customPath")
def custom_path(self) -> Optional[str]:
"""
The full path to redirect. Path cannot be empty and must start with /. Leave empty to use the incoming path as destination path.
"""
return pulumi.get(self, "custom_path")
@property
@pulumi.getter(name="customQueryString")
def custom_query_string(self) -> Optional[str]:
"""
The set of query strings to be placed in the redirect URL. Setting this value would replace any existing query string; leave empty to preserve the incoming query string. Query string must be in <key>=<value> format. The first ? and & will be added automatically so do not include them in the front, but do separate multiple query strings with &.
"""
return pulumi.get(self, "custom_query_string")
@property
@pulumi.getter(name="redirectProtocol")
def redirect_protocol(self) -> Optional[str]:
"""
The protocol of the destination to where the traffic is redirected
"""
return pulumi.get(self, "redirect_protocol")
@property
@pulumi.getter(name="redirectType")
def redirect_type(self) -> Optional[str]:
"""
The redirect type the rule will use when redirecting traffic.
"""
return pulumi.get(self, "redirect_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ReferencedPublicIpAddressResponse(dict):
"""
Reference to a public IP address.
"""
def __init__(__self__, *,
id: Optional[str] = None):
"""
Reference to a public IP address.
:param str id: The PublicIPAddress Reference.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The PublicIPAddress Reference.
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ResourceNavigationLinkResponse(dict):
"""
ResourceNavigationLink resource.
"""
def __init__(__self__, *,
etag: str,
id: str,
provisioning_state: str,
type: str,
link: Optional[str] = None,
linked_resource_type: Optional[str] = None,
name: Optional[str] = None):
"""
ResourceNavigationLink resource.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str provisioning_state: Provisioning state of the ResourceNavigationLink resource.
:param str type: Resource type.
:param str link: Link to the external resource.
:param str linked_resource_type: Resource type of the linked resource.
:param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if link is not None:
pulumi.set(__self__, "link", link)
if linked_resource_type is not None:
pulumi.set(__self__, "linked_resource_type", linked_resource_type)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the ResourceNavigationLink resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def link(self) -> Optional[str]:
"""
Link to the external resource.
"""
return pulumi.get(self, "link")
@property
@pulumi.getter(name="linkedResourceType")
def linked_resource_type(self) -> Optional[str]:
"""
Resource type of the linked resource.
"""
return pulumi.get(self, "linked_resource_type")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RouteFilterRuleResponse(dict):
"""
Route Filter Rule Resource.
"""
def __init__(__self__, *,
access: str,
communities: Sequence[str],
etag: str,
provisioning_state: str,
route_filter_rule_type: str,
id: Optional[str] = None,
location: Optional[str] = None,
name: Optional[str] = None):
"""
Route Filter Rule Resource.
:param str access: The access type of the rule.
:param Sequence[str] communities: The collection for bgp community values to filter on. e.g. ['12076:5010','12076:5020'].
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str provisioning_state: The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
:param str route_filter_rule_type: The rule type of the rule.
:param str id: Resource ID.
:param str location: Resource location.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
pulumi.set(__self__, "access", access)
pulumi.set(__self__, "communities", communities)
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "route_filter_rule_type", route_filter_rule_type)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def access(self) -> str:
"""
The access type of the rule.
"""
return pulumi.get(self, "access")
@property
@pulumi.getter
def communities(self) -> Sequence[str]:
"""
The collection for bgp community values to filter on. e.g. ['12076:5010','12076:5020'].
"""
return pulumi.get(self, "communities")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="routeFilterRuleType")
def route_filter_rule_type(self) -> str:
"""
The rule type of the rule.
"""
return pulumi.get(self, "route_filter_rule_type")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RouteResponse(dict):
"""
Route resource.
"""
def __init__(__self__, *,
next_hop_type: str,
address_prefix: Optional[str] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
next_hop_ip_address: Optional[str] = None,
provisioning_state: Optional[str] = None):
"""
Route resource.
:param str next_hop_type: The type of Azure hop the packet should be sent to.
:param str address_prefix: The destination CIDR to which the route applies.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str next_hop_ip_address: The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance.
:param str provisioning_state: The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
pulumi.set(__self__, "next_hop_type", next_hop_type)
if address_prefix is not None:
pulumi.set(__self__, "address_prefix", address_prefix)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if next_hop_ip_address is not None:
pulumi.set(__self__, "next_hop_ip_address", next_hop_ip_address)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="nextHopType")
def next_hop_type(self) -> str:
"""
The type of Azure hop the packet should be sent to.
"""
return pulumi.get(self, "next_hop_type")
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> Optional[str]:
"""
The destination CIDR to which the route applies.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nextHopIpAddress")
def next_hop_ip_address(self) -> Optional[str]:
"""
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance.
"""
return pulumi.get(self, "next_hop_ip_address")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RouteTableResponse(dict):
"""
Route table resource.
"""
def __init__(__self__, *,
name: str,
subnets: Sequence['outputs.SubnetResponse'],
type: str,
disable_bgp_route_propagation: Optional[bool] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
location: Optional[str] = None,
provisioning_state: Optional[str] = None,
routes: Optional[Sequence['outputs.RouteResponse']] = None,
tags: Optional[Mapping[str, str]] = None):
"""
Route table resource.
:param str name: Resource name.
:param Sequence['SubnetResponseArgs'] subnets: A collection of references to subnets.
:param str type: Resource type.
:param bool disable_bgp_route_propagation: Gets or sets whether to disable the routes learned by BGP on that route table. True means disable.
:param str etag: Gets a unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str location: Resource location.
:param str provisioning_state: The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param Sequence['RouteResponseArgs'] routes: Collection of routes contained within a route table.
:param Mapping[str, str] tags: Resource tags.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "subnets", subnets)
pulumi.set(__self__, "type", type)
if disable_bgp_route_propagation is not None:
pulumi.set(__self__, "disable_bgp_route_propagation", disable_bgp_route_propagation)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if routes is not None:
pulumi.set(__self__, "routes", routes)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def subnets(self) -> Sequence['outputs.SubnetResponse']:
"""
A collection of references to subnets.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="disableBgpRoutePropagation")
def disable_bgp_route_propagation(self) -> Optional[bool]:
"""
Gets or sets whether to disable the routes learned by BGP on that route table. True means disable.
"""
return pulumi.get(self, "disable_bgp_route_propagation")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def routes(self) -> Optional[Sequence['outputs.RouteResponse']]:
"""
Collection of routes contained within a route table.
"""
return pulumi.get(self, "routes")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RoutingRuleResponse(dict):
"""
A routing rule represents a specification for traffic to treat and where to send it, along with health probe information.
"""
def __init__(__self__, *,
resource_state: str,
type: str,
accepted_protocols: Optional[Sequence[str]] = None,
enabled_state: Optional[str] = None,
frontend_endpoints: Optional[Sequence['outputs.SubResourceResponse']] = None,
id: Optional[str] = None,
name: Optional[str] = None,
patterns_to_match: Optional[Sequence[str]] = None,
route_configuration: Optional[Any] = None):
"""
A routing rule represents a specification for traffic to treat and where to send it, along with health probe information.
:param str resource_state: Resource status.
:param str type: Resource type.
:param Sequence[str] accepted_protocols: Protocol schemes to match for this rule
:param str enabled_state: Whether to enable use of this rule. Permitted values are 'Enabled' or 'Disabled'
:param Sequence['SubResourceResponseArgs'] frontend_endpoints: Frontend endpoints associated with this rule
:param str id: Resource ID.
:param str name: Resource name.
:param Sequence[str] patterns_to_match: The route patterns of the rule.
:param Union['ForwardingConfigurationResponseArgs', 'RedirectConfigurationResponseArgs'] route_configuration: A reference to the routing configuration.
"""
pulumi.set(__self__, "resource_state", resource_state)
pulumi.set(__self__, "type", type)
if accepted_protocols is not None:
pulumi.set(__self__, "accepted_protocols", accepted_protocols)
if enabled_state is not None:
pulumi.set(__self__, "enabled_state", enabled_state)
if frontend_endpoints is not None:
pulumi.set(__self__, "frontend_endpoints", frontend_endpoints)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if patterns_to_match is not None:
pulumi.set(__self__, "patterns_to_match", patterns_to_match)
if route_configuration is not None:
pulumi.set(__self__, "route_configuration", route_configuration)
@property
@pulumi.getter(name="resourceState")
def resource_state(self) -> str:
"""
Resource status.
"""
return pulumi.get(self, "resource_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="acceptedProtocols")
def accepted_protocols(self) -> Optional[Sequence[str]]:
"""
Protocol schemes to match for this rule
"""
return pulumi.get(self, "accepted_protocols")
@property
@pulumi.getter(name="enabledState")
def enabled_state(self) -> Optional[str]:
"""
Whether to enable use of this rule. Permitted values are 'Enabled' or 'Disabled'
"""
return pulumi.get(self, "enabled_state")
@property
@pulumi.getter(name="frontendEndpoints")
def frontend_endpoints(self) -> Optional[Sequence['outputs.SubResourceResponse']]:
"""
Frontend endpoints associated with this rule
"""
return pulumi.get(self, "frontend_endpoints")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="patternsToMatch")
def patterns_to_match(self) -> Optional[Sequence[str]]:
"""
The route patterns of the rule.
"""
return pulumi.get(self, "patterns_to_match")
@property
@pulumi.getter(name="routeConfiguration")
def route_configuration(self) -> Optional[Any]:
"""
A reference to the routing configuration.
"""
return pulumi.get(self, "route_configuration")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SecurityRuleResponse(dict):
"""
Network security rule.
"""
def __init__(__self__, *,
access: str,
direction: str,
protocol: str,
description: Optional[str] = None,
destination_address_prefix: Optional[str] = None,
destination_address_prefixes: Optional[Sequence[str]] = None,
destination_application_security_groups: Optional[Sequence['outputs.ApplicationSecurityGroupResponse']] = None,
destination_port_range: Optional[str] = None,
destination_port_ranges: Optional[Sequence[str]] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
priority: Optional[int] = None,
provisioning_state: Optional[str] = None,
source_address_prefix: Optional[str] = None,
source_address_prefixes: Optional[Sequence[str]] = None,
source_application_security_groups: Optional[Sequence['outputs.ApplicationSecurityGroupResponse']] = None,
source_port_range: Optional[str] = None,
source_port_ranges: Optional[Sequence[str]] = None):
"""
Network security rule.
:param str access: The network traffic is allowed or denied.
:param str direction: The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic.
:param str protocol: Network protocol this rule applies to.
:param str description: A description for this rule. Restricted to 140 chars.
:param str destination_address_prefix: The destination address prefix. CIDR or destination IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
:param Sequence[str] destination_address_prefixes: The destination address prefixes. CIDR or destination IP ranges.
:param Sequence['ApplicationSecurityGroupResponseArgs'] destination_application_security_groups: The application security group specified as destination.
:param str destination_port_range: The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
:param Sequence[str] destination_port_ranges: The destination port ranges.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param int priority: The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
:param str provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str source_address_prefix: The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
:param Sequence[str] source_address_prefixes: The CIDR or source IP ranges.
:param Sequence['ApplicationSecurityGroupResponseArgs'] source_application_security_groups: The application security group specified as source.
:param str source_port_range: The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
:param Sequence[str] source_port_ranges: The source port ranges.
"""
pulumi.set(__self__, "access", access)
pulumi.set(__self__, "direction", direction)
pulumi.set(__self__, "protocol", protocol)
if description is not None:
pulumi.set(__self__, "description", description)
if destination_address_prefix is not None:
pulumi.set(__self__, "destination_address_prefix", destination_address_prefix)
if destination_address_prefixes is not None:
pulumi.set(__self__, "destination_address_prefixes", destination_address_prefixes)
if destination_application_security_groups is not None:
pulumi.set(__self__, "destination_application_security_groups", destination_application_security_groups)
if destination_port_range is not None:
pulumi.set(__self__, "destination_port_range", destination_port_range)
if destination_port_ranges is not None:
pulumi.set(__self__, "destination_port_ranges", destination_port_ranges)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if source_address_prefix is not None:
pulumi.set(__self__, "source_address_prefix", source_address_prefix)
if source_address_prefixes is not None:
pulumi.set(__self__, "source_address_prefixes", source_address_prefixes)
if source_application_security_groups is not None:
pulumi.set(__self__, "source_application_security_groups", source_application_security_groups)
if source_port_range is not None:
pulumi.set(__self__, "source_port_range", source_port_range)
if source_port_ranges is not None:
pulumi.set(__self__, "source_port_ranges", source_port_ranges)
@property
@pulumi.getter
def access(self) -> str:
"""
The network traffic is allowed or denied.
"""
return pulumi.get(self, "access")
@property
@pulumi.getter
def direction(self) -> str:
"""
The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic.
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter
def protocol(self) -> str:
"""
Network protocol this rule applies to.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="destinationAddressPrefix")
def destination_address_prefix(self) -> Optional[str]:
"""
The destination address prefix. CIDR or destination IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
"""
return pulumi.get(self, "destination_address_prefix")
@property
@pulumi.getter(name="destinationAddressPrefixes")
def destination_address_prefixes(self) -> Optional[Sequence[str]]:
"""
The destination address prefixes. CIDR or destination IP ranges.
"""
return pulumi.get(self, "destination_address_prefixes")
@property
@pulumi.getter(name="destinationApplicationSecurityGroups")
def destination_application_security_groups(self) -> Optional[Sequence['outputs.ApplicationSecurityGroupResponse']]:
"""
The application security group specified as destination.
"""
return pulumi.get(self, "destination_application_security_groups")
@property
@pulumi.getter(name="destinationPortRange")
def destination_port_range(self) -> Optional[str]:
"""
The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "destination_port_range")
@property
@pulumi.getter(name="destinationPortRanges")
def destination_port_ranges(self) -> Optional[Sequence[str]]:
"""
The destination port ranges.
"""
return pulumi.get(self, "destination_port_ranges")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
"""
The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="sourceAddressPrefix")
def source_address_prefix(self) -> Optional[str]:
"""
The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
"""
return pulumi.get(self, "source_address_prefix")
@property
@pulumi.getter(name="sourceAddressPrefixes")
def source_address_prefixes(self) -> Optional[Sequence[str]]:
"""
The CIDR or source IP ranges.
"""
return pulumi.get(self, "source_address_prefixes")
@property
@pulumi.getter(name="sourceApplicationSecurityGroups")
def source_application_security_groups(self) -> Optional[Sequence['outputs.ApplicationSecurityGroupResponse']]:
"""
The application security group specified as source.
"""
return pulumi.get(self, "source_application_security_groups")
@property
@pulumi.getter(name="sourcePortRange")
def source_port_range(self) -> Optional[str]:
"""
The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "source_port_range")
@property
@pulumi.getter(name="sourcePortRanges")
def source_port_ranges(self) -> Optional[Sequence[str]]:
"""
The source port ranges.
"""
return pulumi.get(self, "source_port_ranges")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceAssociationLinkResponse(dict):
"""
ServiceAssociationLink resource.
"""
def __init__(__self__, *,
etag: str,
provisioning_state: str,
allow_delete: Optional[bool] = None,
id: Optional[str] = None,
link: Optional[str] = None,
linked_resource_type: Optional[str] = None,
locations: Optional[Sequence[str]] = None,
name: Optional[str] = None,
type: Optional[str] = None):
"""
ServiceAssociationLink resource.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str provisioning_state: Provisioning state of the ServiceAssociationLink resource.
:param bool allow_delete: If true, the resource can be deleted.
:param str id: Resource ID.
:param str link: Link to the external resource.
:param str linked_resource_type: Resource type of the linked resource.
:param Sequence[str] locations: A list of locations.
:param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str type: Resource type.
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "provisioning_state", provisioning_state)
if allow_delete is not None:
pulumi.set(__self__, "allow_delete", allow_delete)
if id is not None:
pulumi.set(__self__, "id", id)
if link is not None:
pulumi.set(__self__, "link", link)
if linked_resource_type is not None:
pulumi.set(__self__, "linked_resource_type", linked_resource_type)
if locations is not None:
pulumi.set(__self__, "locations", locations)
if name is not None:
pulumi.set(__self__, "name", name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the ServiceAssociationLink resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="allowDelete")
def allow_delete(self) -> Optional[bool]:
"""
If true, the resource can be deleted.
"""
return pulumi.get(self, "allow_delete")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def link(self) -> Optional[str]:
"""
Link to the external resource.
"""
return pulumi.get(self, "link")
@property
@pulumi.getter(name="linkedResourceType")
def linked_resource_type(self) -> Optional[str]:
"""
Resource type of the linked resource.
"""
return pulumi.get(self, "linked_resource_type")
@property
@pulumi.getter
def locations(self) -> Optional[Sequence[str]]:
"""
A list of locations.
"""
return pulumi.get(self, "locations")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceEndpointPolicyDefinitionResponse(dict):
"""
Service Endpoint policy definitions.
"""
def __init__(__self__, *,
provisioning_state: str,
description: Optional[str] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
service: Optional[str] = None,
service_resources: Optional[Sequence[str]] = None):
"""
Service Endpoint policy definitions.
:param str provisioning_state: The provisioning state of the service end point policy definition. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str description: A description for this rule. Restricted to 140 chars.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str service: Service endpoint name.
:param Sequence[str] service_resources: A list of service resources.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
if description is not None:
pulumi.set(__self__, "description", description)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if service is not None:
pulumi.set(__self__, "service", service)
if service_resources is not None:
pulumi.set(__self__, "service_resources", service_resources)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the service end point policy definition. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def service(self) -> Optional[str]:
"""
Service endpoint name.
"""
return pulumi.get(self, "service")
@property
@pulumi.getter(name="serviceResources")
def service_resources(self) -> Optional[Sequence[str]]:
"""
A list of service resources.
"""
return pulumi.get(self, "service_resources")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceEndpointPolicyResponse(dict):
"""
Service End point policy resource.
"""
def __init__(__self__, *,
name: str,
provisioning_state: str,
resource_guid: str,
subnets: Sequence['outputs.SubnetResponse'],
type: str,
etag: Optional[str] = None,
id: Optional[str] = None,
location: Optional[str] = None,
service_endpoint_policy_definitions: Optional[Sequence['outputs.ServiceEndpointPolicyDefinitionResponse']] = None,
tags: Optional[Mapping[str, str]] = None):
"""
Service End point policy resource.
:param str name: Resource name.
:param str provisioning_state: The provisioning state of the service endpoint policy. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str resource_guid: The resource GUID property of the service endpoint policy resource.
:param Sequence['SubnetResponseArgs'] subnets: A collection of references to subnets.
:param str type: Resource type.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str location: Resource location.
:param Sequence['ServiceEndpointPolicyDefinitionResponseArgs'] service_endpoint_policy_definitions: A collection of service endpoint policy definitions of the service endpoint policy.
:param Mapping[str, str] tags: Resource tags.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "resource_guid", resource_guid)
pulumi.set(__self__, "subnets", subnets)
pulumi.set(__self__, "type", type)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if service_endpoint_policy_definitions is not None:
pulumi.set(__self__, "service_endpoint_policy_definitions", service_endpoint_policy_definitions)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the service endpoint policy. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the service endpoint policy resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def subnets(self) -> Sequence['outputs.SubnetResponse']:
"""
A collection of references to subnets.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="serviceEndpointPolicyDefinitions")
def service_endpoint_policy_definitions(self) -> Optional[Sequence['outputs.ServiceEndpointPolicyDefinitionResponse']]:
"""
A collection of service endpoint policy definitions of the service endpoint policy.
"""
return pulumi.get(self, "service_endpoint_policy_definitions")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceEndpointPropertiesFormatResponse(dict):
"""
The service endpoint properties.
"""
def __init__(__self__, *,
locations: Optional[Sequence[str]] = None,
provisioning_state: Optional[str] = None,
service: Optional[str] = None):
"""
The service endpoint properties.
:param Sequence[str] locations: A list of locations.
:param str provisioning_state: The provisioning state of the resource.
:param str service: The type of the endpoint service.
"""
if locations is not None:
pulumi.set(__self__, "locations", locations)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if service is not None:
pulumi.set(__self__, "service", service)
@property
@pulumi.getter
def locations(self) -> Optional[Sequence[str]]:
"""
A list of locations.
"""
return pulumi.get(self, "locations")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def service(self) -> Optional[str]:
"""
The type of the endpoint service.
"""
return pulumi.get(self, "service")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SubResourceResponse(dict):
"""
Reference to another subresource.
"""
def __init__(__self__, *,
id: Optional[str] = None):
"""
Reference to another subresource.
:param str id: Resource ID.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SubnetResponse(dict):
"""
Subnet in a virtual network resource.
"""
def __init__(__self__, *,
ip_configuration_profiles: Sequence['outputs.IPConfigurationProfileResponse'],
ip_configurations: Sequence['outputs.IPConfigurationResponse'],
private_endpoints: Sequence['outputs.PrivateEndpointResponse'],
purpose: str,
address_prefix: Optional[str] = None,
address_prefixes: Optional[Sequence[str]] = None,
delegations: Optional[Sequence['outputs.DelegationResponse']] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
nat_gateway: Optional['outputs.SubResourceResponse'] = None,
network_security_group: Optional['outputs.NetworkSecurityGroupResponse'] = None,
private_endpoint_network_policies: Optional[str] = None,
private_link_service_network_policies: Optional[str] = None,
provisioning_state: Optional[str] = None,
resource_navigation_links: Optional[Sequence['outputs.ResourceNavigationLinkResponse']] = None,
route_table: Optional['outputs.RouteTableResponse'] = None,
service_association_links: Optional[Sequence['outputs.ServiceAssociationLinkResponse']] = None,
service_endpoint_policies: Optional[Sequence['outputs.ServiceEndpointPolicyResponse']] = None,
service_endpoints: Optional[Sequence['outputs.ServiceEndpointPropertiesFormatResponse']] = None):
"""
Subnet in a virtual network resource.
:param Sequence['IPConfigurationProfileResponseArgs'] ip_configuration_profiles: Array of IP configuration profiles which reference this subnet.
:param Sequence['IPConfigurationResponseArgs'] ip_configurations: Gets an array of references to the network interface IP configurations using subnet.
:param Sequence['PrivateEndpointResponseArgs'] private_endpoints: An array of references to private endpoints.
:param str purpose: A read-only string identifying the intention of use for this subnet based on delegations and other user-defined properties.
:param str address_prefix: The address prefix for the subnet.
:param Sequence[str] address_prefixes: List of address prefixes for the subnet.
:param Sequence['DelegationResponseArgs'] delegations: Gets an array of references to the delegations on the subnet.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param 'SubResourceResponseArgs' nat_gateway: Nat gateway associated with this subnet.
:param 'NetworkSecurityGroupResponseArgs' network_security_group: The reference of the NetworkSecurityGroup resource.
:param str private_endpoint_network_policies: Enable or Disable private end point on the subnet.
:param str private_link_service_network_policies: Enable or Disable private link service on the subnet.
:param str provisioning_state: The provisioning state of the resource.
:param Sequence['ResourceNavigationLinkResponseArgs'] resource_navigation_links: Gets an array of references to the external resources using subnet.
:param 'RouteTableResponseArgs' route_table: The reference of the RouteTable resource.
:param Sequence['ServiceAssociationLinkResponseArgs'] service_association_links: Gets an array of references to services injecting into this subnet.
:param Sequence['ServiceEndpointPolicyResponseArgs'] service_endpoint_policies: An array of service endpoint policies.
:param Sequence['ServiceEndpointPropertiesFormatResponseArgs'] service_endpoints: An array of service endpoints.
"""
pulumi.set(__self__, "ip_configuration_profiles", ip_configuration_profiles)
pulumi.set(__self__, "ip_configurations", ip_configurations)
pulumi.set(__self__, "private_endpoints", private_endpoints)
pulumi.set(__self__, "purpose", purpose)
if address_prefix is not None:
pulumi.set(__self__, "address_prefix", address_prefix)
if address_prefixes is not None:
pulumi.set(__self__, "address_prefixes", address_prefixes)
if delegations is not None:
pulumi.set(__self__, "delegations", delegations)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if nat_gateway is not None:
pulumi.set(__self__, "nat_gateway", nat_gateway)
if network_security_group is not None:
pulumi.set(__self__, "network_security_group", network_security_group)
if private_endpoint_network_policies is not None:
pulumi.set(__self__, "private_endpoint_network_policies", private_endpoint_network_policies)
if private_link_service_network_policies is not None:
pulumi.set(__self__, "private_link_service_network_policies", private_link_service_network_policies)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_navigation_links is not None:
pulumi.set(__self__, "resource_navigation_links", resource_navigation_links)
if route_table is not None:
pulumi.set(__self__, "route_table", route_table)
if service_association_links is not None:
pulumi.set(__self__, "service_association_links", service_association_links)
if service_endpoint_policies is not None:
pulumi.set(__self__, "service_endpoint_policies", service_endpoint_policies)
if service_endpoints is not None:
pulumi.set(__self__, "service_endpoints", service_endpoints)
@property
@pulumi.getter(name="ipConfigurationProfiles")
def ip_configuration_profiles(self) -> Sequence['outputs.IPConfigurationProfileResponse']:
"""
Array of IP configuration profiles which reference this subnet.
"""
return pulumi.get(self, "ip_configuration_profiles")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Sequence['outputs.IPConfigurationResponse']:
"""
Gets an array of references to the network interface IP configurations using subnet.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter(name="privateEndpoints")
def private_endpoints(self) -> Sequence['outputs.PrivateEndpointResponse']:
"""
An array of references to private endpoints.
"""
return pulumi.get(self, "private_endpoints")
@property
@pulumi.getter
def purpose(self) -> str:
"""
A read-only string identifying the intention of use for this subnet based on delegations and other user-defined properties.
"""
return pulumi.get(self, "purpose")
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> Optional[str]:
"""
The address prefix for the subnet.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter(name="addressPrefixes")
def address_prefixes(self) -> Optional[Sequence[str]]:
"""
List of address prefixes for the subnet.
"""
return pulumi.get(self, "address_prefixes")
@property
@pulumi.getter
def delegations(self) -> Optional[Sequence['outputs.DelegationResponse']]:
"""
Gets an array of references to the delegations on the subnet.
"""
return pulumi.get(self, "delegations")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="natGateway")
def nat_gateway(self) -> Optional['outputs.SubResourceResponse']:
"""
Nat gateway associated with this subnet.
"""
return pulumi.get(self, "nat_gateway")
@property
@pulumi.getter(name="networkSecurityGroup")
def network_security_group(self) -> Optional['outputs.NetworkSecurityGroupResponse']:
"""
The reference of the NetworkSecurityGroup resource.
"""
return pulumi.get(self, "network_security_group")
@property
@pulumi.getter(name="privateEndpointNetworkPolicies")
def private_endpoint_network_policies(self) -> Optional[str]:
"""
Enable or Disable private end point on the subnet.
"""
return pulumi.get(self, "private_endpoint_network_policies")
@property
@pulumi.getter(name="privateLinkServiceNetworkPolicies")
def private_link_service_network_policies(self) -> Optional[str]:
"""
Enable or Disable private link service on the subnet.
"""
return pulumi.get(self, "private_link_service_network_policies")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceNavigationLinks")
def resource_navigation_links(self) -> Optional[Sequence['outputs.ResourceNavigationLinkResponse']]:
"""
Gets an array of references to the external resources using subnet.
"""
return pulumi.get(self, "resource_navigation_links")
@property
@pulumi.getter(name="routeTable")
def route_table(self) -> Optional['outputs.RouteTableResponse']:
"""
The reference of the RouteTable resource.
"""
return pulumi.get(self, "route_table")
@property
@pulumi.getter(name="serviceAssociationLinks")
def service_association_links(self) -> Optional[Sequence['outputs.ServiceAssociationLinkResponse']]:
"""
Gets an array of references to services injecting into this subnet.
"""
return pulumi.get(self, "service_association_links")
@property
@pulumi.getter(name="serviceEndpointPolicies")
def service_endpoint_policies(self) -> Optional[Sequence['outputs.ServiceEndpointPolicyResponse']]:
"""
An array of service endpoint policies.
"""
return pulumi.get(self, "service_endpoint_policies")
@property
@pulumi.getter(name="serviceEndpoints")
def service_endpoints(self) -> Optional[Sequence['outputs.ServiceEndpointPropertiesFormatResponse']]:
"""
An array of service endpoints.
"""
return pulumi.get(self, "service_endpoints")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class TunnelConnectionHealthResponse(dict):
"""
VirtualNetworkGatewayConnection properties.
"""
def __init__(__self__, *,
connection_status: str,
egress_bytes_transferred: float,
ingress_bytes_transferred: float,
last_connection_established_utc_time: str,
tunnel: str):
"""
VirtualNetworkGatewayConnection properties.
:param str connection_status: Virtual Network Gateway connection status.
:param float egress_bytes_transferred: The Egress Bytes Transferred in this connection.
:param float ingress_bytes_transferred: The Ingress Bytes Transferred in this connection.
:param str last_connection_established_utc_time: The time at which connection was established in Utc format.
:param str tunnel: Tunnel name.
"""
pulumi.set(__self__, "connection_status", connection_status)
pulumi.set(__self__, "egress_bytes_transferred", egress_bytes_transferred)
pulumi.set(__self__, "ingress_bytes_transferred", ingress_bytes_transferred)
pulumi.set(__self__, "last_connection_established_utc_time", last_connection_established_utc_time)
pulumi.set(__self__, "tunnel", tunnel)
@property
@pulumi.getter(name="connectionStatus")
def connection_status(self) -> str:
"""
Virtual Network Gateway connection status.
"""
return pulumi.get(self, "connection_status")
@property
@pulumi.getter(name="egressBytesTransferred")
def egress_bytes_transferred(self) -> float:
"""
The Egress Bytes Transferred in this connection.
"""
return pulumi.get(self, "egress_bytes_transferred")
@property
@pulumi.getter(name="ingressBytesTransferred")
def ingress_bytes_transferred(self) -> float:
"""
The Ingress Bytes Transferred in this connection.
"""
return pulumi.get(self, "ingress_bytes_transferred")
@property
@pulumi.getter(name="lastConnectionEstablishedUtcTime")
def last_connection_established_utc_time(self) -> str:
"""
The time at which connection was established in Utc format.
"""
return pulumi.get(self, "last_connection_established_utc_time")
@property
@pulumi.getter
def tunnel(self) -> str:
"""
Tunnel name.
"""
return pulumi.get(self, "tunnel")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VirtualHubIdResponse(dict):
"""
Virtual Hub identifier.
"""
def __init__(__self__, *,
id: Optional[str] = None):
"""
Virtual Hub identifier.
:param str id: The resource URI for the Virtual Hub where the ExpressRoute gateway is or will be deployed. The Virtual Hub resource and the ExpressRoute gateway resource reside in the same subscription.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The resource URI for the Virtual Hub where the ExpressRoute gateway is or will be deployed. The Virtual Hub resource and the ExpressRoute gateway resource reside in the same subscription.
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VirtualHubRouteResponse(dict):
"""
VirtualHub route.
"""
def __init__(__self__, *,
address_prefixes: Optional[Sequence[str]] = None,
next_hop_ip_address: Optional[str] = None):
"""
VirtualHub route.
:param Sequence[str] address_prefixes: List of all addressPrefixes.
:param str next_hop_ip_address: NextHop ip address.
"""
if address_prefixes is not None:
pulumi.set(__self__, "address_prefixes", address_prefixes)
if next_hop_ip_address is not None:
pulumi.set(__self__, "next_hop_ip_address", next_hop_ip_address)
@property
@pulumi.getter(name="addressPrefixes")
def address_prefixes(self) -> Optional[Sequence[str]]:
"""
List of all addressPrefixes.
"""
return pulumi.get(self, "address_prefixes")
@property
@pulumi.getter(name="nextHopIpAddress")
def next_hop_ip_address(self) -> Optional[str]:
"""
NextHop ip address.
"""
return pulumi.get(self, "next_hop_ip_address")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VirtualHubRouteTableResponse(dict):
"""
VirtualHub route table.
"""
def __init__(__self__, *,
routes: Optional[Sequence['outputs.VirtualHubRouteResponse']] = None):
"""
VirtualHub route table.
:param Sequence['VirtualHubRouteResponseArgs'] routes: List of all routes.
"""
if routes is not None:
pulumi.set(__self__, "routes", routes)
@property
@pulumi.getter
def routes(self) -> Optional[Sequence['outputs.VirtualHubRouteResponse']]:
"""
List of all routes.
"""
return pulumi.get(self, "routes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VirtualNetworkGatewayIPConfigurationResponse(dict):
"""
IP configuration for virtual network gateway.
"""
def __init__(__self__, *,
provisioning_state: str,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
private_ip_allocation_method: Optional[str] = None,
public_ip_address: Optional['outputs.SubResourceResponse'] = None,
subnet: Optional['outputs.SubResourceResponse'] = None):
"""
IP configuration for virtual network gateway.
:param str provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str private_ip_allocation_method: The private IP address allocation method.
:param 'SubResourceResponseArgs' public_ip_address: The reference of the public IP resource.
:param 'SubResourceResponseArgs' subnet: The reference of the subnet resource.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip_allocation_method is not None:
pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method)
if public_ip_address is not None:
pulumi.set(__self__, "public_ip_address", public_ip_address)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateIPAllocationMethod")
def private_ip_allocation_method(self) -> Optional[str]:
"""
The private IP address allocation method.
"""
return pulumi.get(self, "private_ip_allocation_method")
@property
@pulumi.getter(name="publicIPAddress")
def public_ip_address(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference of the public IP resource.
"""
return pulumi.get(self, "public_ip_address")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference of the subnet resource.
"""
return pulumi.get(self, "subnet")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VirtualNetworkGatewayResponse(dict):
"""
A common class for general resource information.
"""
def __init__(__self__, *,
name: str,
provisioning_state: str,
type: str,
active_active: Optional[bool] = None,
bgp_settings: Optional['outputs.BgpSettingsResponse'] = None,
custom_routes: Optional['outputs.AddressSpaceResponse'] = None,
enable_bgp: Optional[bool] = None,
etag: Optional[str] = None,
gateway_default_site: Optional['outputs.SubResourceResponse'] = None,
gateway_type: Optional[str] = None,
id: Optional[str] = None,
ip_configurations: Optional[Sequence['outputs.VirtualNetworkGatewayIPConfigurationResponse']] = None,
location: Optional[str] = None,
resource_guid: Optional[str] = None,
sku: Optional['outputs.VirtualNetworkGatewaySkuResponse'] = None,
tags: Optional[Mapping[str, str]] = None,
vpn_client_configuration: Optional['outputs.VpnClientConfigurationResponse'] = None,
vpn_type: Optional[str] = None):
"""
A common class for general resource information.
:param str name: Resource name.
:param str provisioning_state: The provisioning state of the VirtualNetworkGateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str type: Resource type.
:param bool active_active: ActiveActive flag.
:param 'BgpSettingsResponseArgs' bgp_settings: Virtual network gateway's BGP speaker settings.
:param 'AddressSpaceResponseArgs' custom_routes: The reference of the address space resource which represents the custom routes address space specified by the customer for virtual network gateway and VpnClient.
:param bool enable_bgp: Whether BGP is enabled for this virtual network gateway or not.
:param str etag: Gets a unique read-only string that changes whenever the resource is updated.
:param 'SubResourceResponseArgs' gateway_default_site: The reference of the LocalNetworkGateway resource which represents local network site having default routes. Assign Null value in case of removing existing default site setting.
:param str gateway_type: The type of this virtual network gateway.
:param str id: Resource ID.
:param Sequence['VirtualNetworkGatewayIPConfigurationResponseArgs'] ip_configurations: IP configurations for virtual network gateway.
:param str location: Resource location.
:param str resource_guid: The resource GUID property of the VirtualNetworkGateway resource.
:param 'VirtualNetworkGatewaySkuResponseArgs' sku: The reference of the VirtualNetworkGatewaySku resource which represents the SKU selected for Virtual network gateway.
:param Mapping[str, str] tags: Resource tags.
:param 'VpnClientConfigurationResponseArgs' vpn_client_configuration: The reference of the VpnClientConfiguration resource which represents the P2S VpnClient configurations.
:param str vpn_type: The type of this virtual network gateway.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if active_active is not None:
pulumi.set(__self__, "active_active", active_active)
if bgp_settings is not None:
pulumi.set(__self__, "bgp_settings", bgp_settings)
if custom_routes is not None:
pulumi.set(__self__, "custom_routes", custom_routes)
if enable_bgp is not None:
pulumi.set(__self__, "enable_bgp", enable_bgp)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if gateway_default_site is not None:
pulumi.set(__self__, "gateway_default_site", gateway_default_site)
if gateway_type is not None:
pulumi.set(__self__, "gateway_type", gateway_type)
if id is not None:
pulumi.set(__self__, "id", id)
if ip_configurations is not None:
pulumi.set(__self__, "ip_configurations", ip_configurations)
if location is not None:
pulumi.set(__self__, "location", location)
if resource_guid is not None:
pulumi.set(__self__, "resource_guid", resource_guid)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vpn_client_configuration is not None:
pulumi.set(__self__, "vpn_client_configuration", vpn_client_configuration)
if vpn_type is not None:
pulumi.set(__self__, "vpn_type", vpn_type)
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the VirtualNetworkGateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="activeActive")
def active_active(self) -> Optional[bool]:
"""
ActiveActive flag.
"""
return pulumi.get(self, "active_active")
@property
@pulumi.getter(name="bgpSettings")
def bgp_settings(self) -> Optional['outputs.BgpSettingsResponse']:
"""
Virtual network gateway's BGP speaker settings.
"""
return pulumi.get(self, "bgp_settings")
@property
@pulumi.getter(name="customRoutes")
def custom_routes(self) -> Optional['outputs.AddressSpaceResponse']:
"""
The reference of the address space resource which represents the custom routes address space specified by the customer for virtual network gateway and VpnClient.
"""
return pulumi.get(self, "custom_routes")
@property
@pulumi.getter(name="enableBgp")
def enable_bgp(self) -> Optional[bool]:
"""
Whether BGP is enabled for this virtual network gateway or not.
"""
return pulumi.get(self, "enable_bgp")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="gatewayDefaultSite")
def gateway_default_site(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference of the LocalNetworkGateway resource which represents local network site having default routes. Assign Null value in case of removing existing default site setting.
"""
return pulumi.get(self, "gateway_default_site")
@property
@pulumi.getter(name="gatewayType")
def gateway_type(self) -> Optional[str]:
"""
The type of this virtual network gateway.
"""
return pulumi.get(self, "gateway_type")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[Sequence['outputs.VirtualNetworkGatewayIPConfigurationResponse']]:
"""
IP configurations for virtual network gateway.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the VirtualNetworkGateway resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.VirtualNetworkGatewaySkuResponse']:
"""
The reference of the VirtualNetworkGatewaySku resource which represents the SKU selected for Virtual network gateway.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="vpnClientConfiguration")
def vpn_client_configuration(self) -> Optional['outputs.VpnClientConfigurationResponse']:
"""
The reference of the VpnClientConfiguration resource which represents the P2S VpnClient configurations.
"""
return pulumi.get(self, "vpn_client_configuration")
@property
@pulumi.getter(name="vpnType")
def vpn_type(self) -> Optional[str]:
"""
The type of this virtual network gateway.
"""
return pulumi.get(self, "vpn_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VirtualNetworkGatewaySkuResponse(dict):
"""
VirtualNetworkGatewaySku details.
"""
def __init__(__self__, *,
capacity: Optional[int] = None,
name: Optional[str] = None,
tier: Optional[str] = None):
"""
VirtualNetworkGatewaySku details.
:param int capacity: The capacity.
:param str name: Gateway SKU name.
:param str tier: Gateway SKU tier.
"""
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if name is not None:
pulumi.set(__self__, "name", name)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def capacity(self) -> Optional[int]:
"""
The capacity.
"""
return pulumi.get(self, "capacity")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gateway SKU name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tier(self) -> Optional[str]:
"""
Gateway SKU tier.
"""
return pulumi.get(self, "tier")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VirtualNetworkPeeringResponse(dict):
"""
Peerings in a virtual network resource.
"""
def __init__(__self__, *,
allow_forwarded_traffic: Optional[bool] = None,
allow_gateway_transit: Optional[bool] = None,
allow_virtual_network_access: Optional[bool] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
peering_state: Optional[str] = None,
provisioning_state: Optional[str] = None,
remote_address_space: Optional['outputs.AddressSpaceResponse'] = None,
remote_virtual_network: Optional['outputs.SubResourceResponse'] = None,
use_remote_gateways: Optional[bool] = None):
"""
Peerings in a virtual network resource.
:param bool allow_forwarded_traffic: Whether the forwarded traffic from the VMs in the local virtual network will be allowed/disallowed in remote virtual network.
:param bool allow_gateway_transit: If gateway links can be used in remote virtual networking to link to this virtual network.
:param bool allow_virtual_network_access: Whether the VMs in the local virtual network space would be able to access the VMs in remote virtual network space.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str peering_state: The status of the virtual network peering.
:param str provisioning_state: The provisioning state of the resource.
:param 'AddressSpaceResponseArgs' remote_address_space: The reference of the remote virtual network address space.
:param 'SubResourceResponseArgs' remote_virtual_network: The reference of the remote virtual network. The remote virtual network can be in the same or different region (preview). See here to register for the preview and learn more (https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-create-peering).
:param bool use_remote_gateways: If remote gateways can be used on this virtual network. If the flag is set to true, and allowGatewayTransit on remote peering is also true, virtual network will use gateways of remote virtual network for transit. Only one peering can have this flag set to true. This flag cannot be set if virtual network already has a gateway.
"""
if allow_forwarded_traffic is not None:
pulumi.set(__self__, "allow_forwarded_traffic", allow_forwarded_traffic)
if allow_gateway_transit is not None:
pulumi.set(__self__, "allow_gateway_transit", allow_gateway_transit)
if allow_virtual_network_access is not None:
pulumi.set(__self__, "allow_virtual_network_access", allow_virtual_network_access)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if peering_state is not None:
pulumi.set(__self__, "peering_state", peering_state)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if remote_address_space is not None:
pulumi.set(__self__, "remote_address_space", remote_address_space)
if remote_virtual_network is not None:
pulumi.set(__self__, "remote_virtual_network", remote_virtual_network)
if use_remote_gateways is not None:
pulumi.set(__self__, "use_remote_gateways", use_remote_gateways)
@property
@pulumi.getter(name="allowForwardedTraffic")
def allow_forwarded_traffic(self) -> Optional[bool]:
"""
Whether the forwarded traffic from the VMs in the local virtual network will be allowed/disallowed in remote virtual network.
"""
return pulumi.get(self, "allow_forwarded_traffic")
@property
@pulumi.getter(name="allowGatewayTransit")
def allow_gateway_transit(self) -> Optional[bool]:
"""
If gateway links can be used in remote virtual networking to link to this virtual network.
"""
return pulumi.get(self, "allow_gateway_transit")
@property
@pulumi.getter(name="allowVirtualNetworkAccess")
def allow_virtual_network_access(self) -> Optional[bool]:
"""
Whether the VMs in the local virtual network space would be able to access the VMs in remote virtual network space.
"""
return pulumi.get(self, "allow_virtual_network_access")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peeringState")
def peering_state(self) -> Optional[str]:
"""
The status of the virtual network peering.
"""
return pulumi.get(self, "peering_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="remoteAddressSpace")
def remote_address_space(self) -> Optional['outputs.AddressSpaceResponse']:
"""
The reference of the remote virtual network address space.
"""
return pulumi.get(self, "remote_address_space")
@property
@pulumi.getter(name="remoteVirtualNetwork")
def remote_virtual_network(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference of the remote virtual network. The remote virtual network can be in the same or different region (preview). See here to register for the preview and learn more (https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-create-peering).
"""
return pulumi.get(self, "remote_virtual_network")
@property
@pulumi.getter(name="useRemoteGateways")
def use_remote_gateways(self) -> Optional[bool]:
"""
If remote gateways can be used on this virtual network. If the flag is set to true, and allowGatewayTransit on remote peering is also true, virtual network will use gateways of remote virtual network for transit. Only one peering can have this flag set to true. This flag cannot be set if virtual network already has a gateway.
"""
return pulumi.get(self, "use_remote_gateways")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VirtualNetworkTapResponse(dict):
"""
Virtual Network Tap resource.
"""
def __init__(__self__, *,
name: str,
network_interface_tap_configurations: Sequence['outputs.NetworkInterfaceTapConfigurationResponse'],
provisioning_state: str,
resource_guid: str,
type: str,
destination_load_balancer_front_end_ip_configuration: Optional['outputs.FrontendIPConfigurationResponse'] = None,
destination_network_interface_ip_configuration: Optional['outputs.NetworkInterfaceIPConfigurationResponse'] = None,
destination_port: Optional[int] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
location: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None):
"""
Virtual Network Tap resource.
:param str name: Resource name.
:param Sequence['NetworkInterfaceTapConfigurationResponseArgs'] network_interface_tap_configurations: Specifies the list of resource IDs for the network interface IP configuration that needs to be tapped.
:param str provisioning_state: The provisioning state of the virtual network tap. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str resource_guid: The resourceGuid property of the virtual network tap.
:param str type: Resource type.
:param 'FrontendIPConfigurationResponseArgs' destination_load_balancer_front_end_ip_configuration: The reference to the private IP address on the internal Load Balancer that will receive the tap.
:param 'NetworkInterfaceIPConfigurationResponseArgs' destination_network_interface_ip_configuration: The reference to the private IP Address of the collector nic that will receive the tap.
:param int destination_port: The VXLAN destination port that will receive the tapped traffic.
:param str etag: Gets a unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str location: Resource location.
:param Mapping[str, str] tags: Resource tags.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "network_interface_tap_configurations", network_interface_tap_configurations)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "resource_guid", resource_guid)
pulumi.set(__self__, "type", type)
if destination_load_balancer_front_end_ip_configuration is not None:
pulumi.set(__self__, "destination_load_balancer_front_end_ip_configuration", destination_load_balancer_front_end_ip_configuration)
if destination_network_interface_ip_configuration is not None:
pulumi.set(__self__, "destination_network_interface_ip_configuration", destination_network_interface_ip_configuration)
if destination_port is not None:
pulumi.set(__self__, "destination_port", destination_port)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaceTapConfigurations")
def network_interface_tap_configurations(self) -> Sequence['outputs.NetworkInterfaceTapConfigurationResponse']:
"""
Specifies the list of resource IDs for the network interface IP configuration that needs to be tapped.
"""
return pulumi.get(self, "network_interface_tap_configurations")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the virtual network tap. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resourceGuid property of the virtual network tap.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="destinationLoadBalancerFrontEndIPConfiguration")
def destination_load_balancer_front_end_ip_configuration(self) -> Optional['outputs.FrontendIPConfigurationResponse']:
"""
The reference to the private IP address on the internal Load Balancer that will receive the tap.
"""
return pulumi.get(self, "destination_load_balancer_front_end_ip_configuration")
@property
@pulumi.getter(name="destinationNetworkInterfaceIPConfiguration")
def destination_network_interface_ip_configuration(self) -> Optional['outputs.NetworkInterfaceIPConfigurationResponse']:
"""
The reference to the private IP Address of the collector nic that will receive the tap.
"""
return pulumi.get(self, "destination_network_interface_ip_configuration")
@property
@pulumi.getter(name="destinationPort")
def destination_port(self) -> Optional[int]:
"""
The VXLAN destination port that will receive the tapped traffic.
"""
return pulumi.get(self, "destination_port")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VpnClientConfigurationResponse(dict):
"""
VpnClientConfiguration for P2S client.
"""
def __init__(__self__, *,
aad_audience: Optional[str] = None,
aad_issuer: Optional[str] = None,
aad_tenant: Optional[str] = None,
radius_server_address: Optional[str] = None,
radius_server_secret: Optional[str] = None,
vpn_client_address_pool: Optional['outputs.AddressSpaceResponse'] = None,
vpn_client_ipsec_policies: Optional[Sequence['outputs.IpsecPolicyResponse']] = None,
vpn_client_protocols: Optional[Sequence[str]] = None,
vpn_client_revoked_certificates: Optional[Sequence['outputs.VpnClientRevokedCertificateResponse']] = None,
vpn_client_root_certificates: Optional[Sequence['outputs.VpnClientRootCertificateResponse']] = None):
"""
VpnClientConfiguration for P2S client.
:param str aad_audience: The AADAudience property of the VirtualNetworkGateway resource for vpn client connection used for AAD authentication.
:param str aad_issuer: The AADIssuer property of the VirtualNetworkGateway resource for vpn client connection used for AAD authentication.
:param str aad_tenant: The AADTenant property of the VirtualNetworkGateway resource for vpn client connection used for AAD authentication.
:param str radius_server_address: The radius server address property of the VirtualNetworkGateway resource for vpn client connection.
:param str radius_server_secret: The radius secret property of the VirtualNetworkGateway resource for vpn client connection.
:param 'AddressSpaceResponseArgs' vpn_client_address_pool: The reference of the address space resource which represents Address space for P2S VpnClient.
:param Sequence['IpsecPolicyResponseArgs'] vpn_client_ipsec_policies: VpnClientIpsecPolicies for virtual network gateway P2S client.
:param Sequence[str] vpn_client_protocols: VpnClientProtocols for Virtual network gateway.
:param Sequence['VpnClientRevokedCertificateResponseArgs'] vpn_client_revoked_certificates: VpnClientRevokedCertificate for Virtual network gateway.
:param Sequence['VpnClientRootCertificateResponseArgs'] vpn_client_root_certificates: VpnClientRootCertificate for virtual network gateway.
"""
if aad_audience is not None:
pulumi.set(__self__, "aad_audience", aad_audience)
if aad_issuer is not None:
pulumi.set(__self__, "aad_issuer", aad_issuer)
if aad_tenant is not None:
pulumi.set(__self__, "aad_tenant", aad_tenant)
if radius_server_address is not None:
pulumi.set(__self__, "radius_server_address", radius_server_address)
if radius_server_secret is not None:
pulumi.set(__self__, "radius_server_secret", radius_server_secret)
if vpn_client_address_pool is not None:
pulumi.set(__self__, "vpn_client_address_pool", vpn_client_address_pool)
if vpn_client_ipsec_policies is not None:
pulumi.set(__self__, "vpn_client_ipsec_policies", vpn_client_ipsec_policies)
if vpn_client_protocols is not None:
pulumi.set(__self__, "vpn_client_protocols", vpn_client_protocols)
if vpn_client_revoked_certificates is not None:
pulumi.set(__self__, "vpn_client_revoked_certificates", vpn_client_revoked_certificates)
if vpn_client_root_certificates is not None:
pulumi.set(__self__, "vpn_client_root_certificates", vpn_client_root_certificates)
@property
@pulumi.getter(name="aadAudience")
def aad_audience(self) -> Optional[str]:
"""
The AADAudience property of the VirtualNetworkGateway resource for vpn client connection used for AAD authentication.
"""
return pulumi.get(self, "aad_audience")
@property
@pulumi.getter(name="aadIssuer")
def aad_issuer(self) -> Optional[str]:
"""
The AADIssuer property of the VirtualNetworkGateway resource for vpn client connection used for AAD authentication.
"""
return pulumi.get(self, "aad_issuer")
@property
@pulumi.getter(name="aadTenant")
def aad_tenant(self) -> Optional[str]:
"""
The AADTenant property of the VirtualNetworkGateway resource for vpn client connection used for AAD authentication.
"""
return pulumi.get(self, "aad_tenant")
@property
@pulumi.getter(name="radiusServerAddress")
def radius_server_address(self) -> Optional[str]:
"""
The radius server address property of the VirtualNetworkGateway resource for vpn client connection.
"""
return pulumi.get(self, "radius_server_address")
@property
@pulumi.getter(name="radiusServerSecret")
def radius_server_secret(self) -> Optional[str]:
"""
The radius secret property of the VirtualNetworkGateway resource for vpn client connection.
"""
return pulumi.get(self, "radius_server_secret")
@property
@pulumi.getter(name="vpnClientAddressPool")
def vpn_client_address_pool(self) -> Optional['outputs.AddressSpaceResponse']:
"""
The reference of the address space resource which represents Address space for P2S VpnClient.
"""
return pulumi.get(self, "vpn_client_address_pool")
@property
@pulumi.getter(name="vpnClientIpsecPolicies")
def vpn_client_ipsec_policies(self) -> Optional[Sequence['outputs.IpsecPolicyResponse']]:
"""
VpnClientIpsecPolicies for virtual network gateway P2S client.
"""
return pulumi.get(self, "vpn_client_ipsec_policies")
@property
@pulumi.getter(name="vpnClientProtocols")
def vpn_client_protocols(self) -> Optional[Sequence[str]]:
"""
VpnClientProtocols for Virtual network gateway.
"""
return pulumi.get(self, "vpn_client_protocols")
@property
@pulumi.getter(name="vpnClientRevokedCertificates")
def vpn_client_revoked_certificates(self) -> Optional[Sequence['outputs.VpnClientRevokedCertificateResponse']]:
"""
VpnClientRevokedCertificate for Virtual network gateway.
"""
return pulumi.get(self, "vpn_client_revoked_certificates")
@property
@pulumi.getter(name="vpnClientRootCertificates")
def vpn_client_root_certificates(self) -> Optional[Sequence['outputs.VpnClientRootCertificateResponse']]:
"""
VpnClientRootCertificate for virtual network gateway.
"""
return pulumi.get(self, "vpn_client_root_certificates")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VpnClientConnectionHealthDetailResponseResult(dict):
"""
VPN client connection health detail.
"""
def __init__(__self__, *,
egress_bytes_transferred: float,
egress_packets_transferred: float,
ingress_bytes_transferred: float,
ingress_packets_transferred: float,
max_bandwidth: float,
max_packets_per_second: float,
private_ip_address: str,
public_ip_address: str,
vpn_connection_duration: float,
vpn_connection_id: str,
vpn_connection_time: str,
vpn_user_name: str):
"""
VPN client connection health detail.
:param float egress_bytes_transferred: The egress bytes per second.
:param float egress_packets_transferred: The egress packets per second.
:param float ingress_bytes_transferred: The ingress bytes per second.
:param float ingress_packets_transferred: The ingress packets per second.
:param float max_bandwidth: The max band width.
:param float max_packets_per_second: The max packets transferred per second.
:param str private_ip_address: The assigned private Ip of a connected vpn client.
:param str public_ip_address: The public Ip of a connected vpn client.
:param float vpn_connection_duration: The duration time of a connected vpn client.
:param str vpn_connection_id: The vpn client Id.
:param str vpn_connection_time: The start time of a connected vpn client.
:param str vpn_user_name: The user name of a connected vpn client.
"""
pulumi.set(__self__, "egress_bytes_transferred", egress_bytes_transferred)
pulumi.set(__self__, "egress_packets_transferred", egress_packets_transferred)
pulumi.set(__self__, "ingress_bytes_transferred", ingress_bytes_transferred)
pulumi.set(__self__, "ingress_packets_transferred", ingress_packets_transferred)
pulumi.set(__self__, "max_bandwidth", max_bandwidth)
pulumi.set(__self__, "max_packets_per_second", max_packets_per_second)
pulumi.set(__self__, "private_ip_address", private_ip_address)
pulumi.set(__self__, "public_ip_address", public_ip_address)
pulumi.set(__self__, "vpn_connection_duration", vpn_connection_duration)
pulumi.set(__self__, "vpn_connection_id", vpn_connection_id)
pulumi.set(__self__, "vpn_connection_time", vpn_connection_time)
pulumi.set(__self__, "vpn_user_name", vpn_user_name)
@property
@pulumi.getter(name="egressBytesTransferred")
def egress_bytes_transferred(self) -> float:
"""
The egress bytes per second.
"""
return pulumi.get(self, "egress_bytes_transferred")
@property
@pulumi.getter(name="egressPacketsTransferred")
def egress_packets_transferred(self) -> float:
"""
The egress packets per second.
"""
return pulumi.get(self, "egress_packets_transferred")
@property
@pulumi.getter(name="ingressBytesTransferred")
def ingress_bytes_transferred(self) -> float:
"""
The ingress bytes per second.
"""
return pulumi.get(self, "ingress_bytes_transferred")
@property
@pulumi.getter(name="ingressPacketsTransferred")
def ingress_packets_transferred(self) -> float:
"""
The ingress packets per second.
"""
return pulumi.get(self, "ingress_packets_transferred")
@property
@pulumi.getter(name="maxBandwidth")
def max_bandwidth(self) -> float:
"""
The max band width.
"""
return pulumi.get(self, "max_bandwidth")
@property
@pulumi.getter(name="maxPacketsPerSecond")
def max_packets_per_second(self) -> float:
"""
The max packets transferred per second.
"""
return pulumi.get(self, "max_packets_per_second")
@property
@pulumi.getter(name="privateIpAddress")
def private_ip_address(self) -> str:
"""
The assigned private Ip of a connected vpn client.
"""
return pulumi.get(self, "private_ip_address")
@property
@pulumi.getter(name="publicIpAddress")
def public_ip_address(self) -> str:
"""
The public Ip of a connected vpn client.
"""
return pulumi.get(self, "public_ip_address")
@property
@pulumi.getter(name="vpnConnectionDuration")
def vpn_connection_duration(self) -> float:
"""
The duration time of a connected vpn client.
"""
return pulumi.get(self, "vpn_connection_duration")
@property
@pulumi.getter(name="vpnConnectionId")
def vpn_connection_id(self) -> str:
"""
The vpn client Id.
"""
return pulumi.get(self, "vpn_connection_id")
@property
@pulumi.getter(name="vpnConnectionTime")
def vpn_connection_time(self) -> str:
"""
The start time of a connected vpn client.
"""
return pulumi.get(self, "vpn_connection_time")
@property
@pulumi.getter(name="vpnUserName")
def vpn_user_name(self) -> str:
"""
The user name of a connected vpn client.
"""
return pulumi.get(self, "vpn_user_name")
@pulumi.output_type
class VpnClientConnectionHealthResponse(dict):
"""
VpnClientConnectionHealth properties.
"""
def __init__(__self__, *,
total_egress_bytes_transferred: float,
total_ingress_bytes_transferred: float,
allocated_ip_addresses: Optional[Sequence[str]] = None,
vpn_client_connections_count: Optional[int] = None):
"""
VpnClientConnectionHealth properties.
:param float total_egress_bytes_transferred: Total of the Egress Bytes Transferred in this connection.
:param float total_ingress_bytes_transferred: Total of the Ingress Bytes Transferred in this P2S Vpn connection.
:param Sequence[str] allocated_ip_addresses: List of allocated ip addresses to the connected p2s vpn clients.
:param int vpn_client_connections_count: The total of p2s vpn clients connected at this time to this P2SVpnGateway.
"""
pulumi.set(__self__, "total_egress_bytes_transferred", total_egress_bytes_transferred)
pulumi.set(__self__, "total_ingress_bytes_transferred", total_ingress_bytes_transferred)
if allocated_ip_addresses is not None:
pulumi.set(__self__, "allocated_ip_addresses", allocated_ip_addresses)
if vpn_client_connections_count is not None:
pulumi.set(__self__, "vpn_client_connections_count", vpn_client_connections_count)
@property
@pulumi.getter(name="totalEgressBytesTransferred")
def total_egress_bytes_transferred(self) -> float:
"""
Total of the Egress Bytes Transferred in this connection.
"""
return pulumi.get(self, "total_egress_bytes_transferred")
@property
@pulumi.getter(name="totalIngressBytesTransferred")
def total_ingress_bytes_transferred(self) -> float:
"""
Total of the Ingress Bytes Transferred in this P2S Vpn connection.
"""
return pulumi.get(self, "total_ingress_bytes_transferred")
@property
@pulumi.getter(name="allocatedIpAddresses")
def allocated_ip_addresses(self) -> Optional[Sequence[str]]:
"""
List of allocated ip addresses to the connected p2s vpn clients.
"""
return pulumi.get(self, "allocated_ip_addresses")
@property
@pulumi.getter(name="vpnClientConnectionsCount")
def vpn_client_connections_count(self) -> Optional[int]:
"""
The total of p2s vpn clients connected at this time to this P2SVpnGateway.
"""
return pulumi.get(self, "vpn_client_connections_count")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VpnClientRevokedCertificateResponse(dict):
"""
VPN client revoked certificate of virtual network gateway.
"""
def __init__(__self__, *,
provisioning_state: str,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
thumbprint: Optional[str] = None):
"""
VPN client revoked certificate of virtual network gateway.
:param str provisioning_state: The provisioning state of the VPN client revoked certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str thumbprint: The revoked VPN client certificate thumbprint.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if thumbprint is not None:
pulumi.set(__self__, "thumbprint", thumbprint)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the VPN client revoked certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def thumbprint(self) -> Optional[str]:
"""
The revoked VPN client certificate thumbprint.
"""
return pulumi.get(self, "thumbprint")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VpnClientRootCertificateResponse(dict):
"""
VPN client root certificate of virtual network gateway.
"""
def __init__(__self__, *,
provisioning_state: str,
public_cert_data: str,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None):
"""
VPN client root certificate of virtual network gateway.
:param str provisioning_state: The provisioning state of the VPN client root certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str public_cert_data: The certificate public data.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "public_cert_data", public_cert_data)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the VPN client root certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicCertData")
def public_cert_data(self) -> str:
"""
The certificate public data.
"""
return pulumi.get(self, "public_cert_data")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VpnConnectionResponse(dict):
"""
VpnConnection Resource.
"""
def __init__(__self__, *,
connection_status: str,
egress_bytes_transferred: float,
etag: str,
ingress_bytes_transferred: float,
provisioning_state: str,
connection_bandwidth: Optional[int] = None,
enable_bgp: Optional[bool] = None,
enable_internet_security: Optional[bool] = None,
enable_rate_limiting: Optional[bool] = None,
id: Optional[str] = None,
ipsec_policies: Optional[Sequence['outputs.IpsecPolicyResponse']] = None,
name: Optional[str] = None,
remote_vpn_site: Optional['outputs.SubResourceResponse'] = None,
routing_weight: Optional[int] = None,
shared_key: Optional[str] = None,
use_local_azure_ip_address: Optional[bool] = None,
use_policy_based_traffic_selectors: Optional[bool] = None,
vpn_connection_protocol_type: Optional[str] = None):
"""
VpnConnection Resource.
:param str connection_status: The connection status.
:param float egress_bytes_transferred: Egress bytes transferred.
:param str etag: Gets a unique read-only string that changes whenever the resource is updated.
:param float ingress_bytes_transferred: Ingress bytes transferred.
:param str provisioning_state: The provisioning state of the resource.
:param int connection_bandwidth: Expected bandwidth in MBPS.
:param bool enable_bgp: EnableBgp flag.
:param bool enable_internet_security: Enable internet security.
:param bool enable_rate_limiting: EnableBgp flag.
:param str id: Resource ID.
:param Sequence['IpsecPolicyResponseArgs'] ipsec_policies: The IPSec Policies to be considered by this connection.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param 'SubResourceResponseArgs' remote_vpn_site: Id of the connected vpn site.
:param int routing_weight: Routing weight for vpn connection.
:param str shared_key: SharedKey for the vpn connection.
:param bool use_local_azure_ip_address: Use local azure ip to initiate connection.
:param bool use_policy_based_traffic_selectors: Enable policy-based traffic selectors.
:param str vpn_connection_protocol_type: Connection protocol used for this connection.
"""
pulumi.set(__self__, "connection_status", connection_status)
pulumi.set(__self__, "egress_bytes_transferred", egress_bytes_transferred)
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "ingress_bytes_transferred", ingress_bytes_transferred)
pulumi.set(__self__, "provisioning_state", provisioning_state)
if connection_bandwidth is not None:
pulumi.set(__self__, "connection_bandwidth", connection_bandwidth)
if enable_bgp is not None:
pulumi.set(__self__, "enable_bgp", enable_bgp)
if enable_internet_security is not None:
pulumi.set(__self__, "enable_internet_security", enable_internet_security)
if enable_rate_limiting is not None:
pulumi.set(__self__, "enable_rate_limiting", enable_rate_limiting)
if id is not None:
pulumi.set(__self__, "id", id)
if ipsec_policies is not None:
pulumi.set(__self__, "ipsec_policies", ipsec_policies)
if name is not None:
pulumi.set(__self__, "name", name)
if remote_vpn_site is not None:
pulumi.set(__self__, "remote_vpn_site", remote_vpn_site)
if routing_weight is not None:
pulumi.set(__self__, "routing_weight", routing_weight)
if shared_key is not None:
pulumi.set(__self__, "shared_key", shared_key)
if use_local_azure_ip_address is not None:
pulumi.set(__self__, "use_local_azure_ip_address", use_local_azure_ip_address)
if use_policy_based_traffic_selectors is not None:
pulumi.set(__self__, "use_policy_based_traffic_selectors", use_policy_based_traffic_selectors)
if vpn_connection_protocol_type is not None:
pulumi.set(__self__, "vpn_connection_protocol_type", vpn_connection_protocol_type)
@property
@pulumi.getter(name="connectionStatus")
def connection_status(self) -> str:
"""
The connection status.
"""
return pulumi.get(self, "connection_status")
@property
@pulumi.getter(name="egressBytesTransferred")
def egress_bytes_transferred(self) -> float:
"""
Egress bytes transferred.
"""
return pulumi.get(self, "egress_bytes_transferred")
@property
@pulumi.getter
def etag(self) -> str:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ingressBytesTransferred")
def ingress_bytes_transferred(self) -> float:
"""
Ingress bytes transferred.
"""
return pulumi.get(self, "ingress_bytes_transferred")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="connectionBandwidth")
def connection_bandwidth(self) -> Optional[int]:
"""
Expected bandwidth in MBPS.
"""
return pulumi.get(self, "connection_bandwidth")
@property
@pulumi.getter(name="enableBgp")
def enable_bgp(self) -> Optional[bool]:
"""
EnableBgp flag.
"""
return pulumi.get(self, "enable_bgp")
@property
@pulumi.getter(name="enableInternetSecurity")
def enable_internet_security(self) -> Optional[bool]:
"""
Enable internet security.
"""
return pulumi.get(self, "enable_internet_security")
@property
@pulumi.getter(name="enableRateLimiting")
def enable_rate_limiting(self) -> Optional[bool]:
"""
EnableBgp flag.
"""
return pulumi.get(self, "enable_rate_limiting")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipsecPolicies")
def ipsec_policies(self) -> Optional[Sequence['outputs.IpsecPolicyResponse']]:
"""
The IPSec Policies to be considered by this connection.
"""
return pulumi.get(self, "ipsec_policies")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="remoteVpnSite")
def remote_vpn_site(self) -> Optional['outputs.SubResourceResponse']:
"""
Id of the connected vpn site.
"""
return pulumi.get(self, "remote_vpn_site")
@property
@pulumi.getter(name="routingWeight")
def routing_weight(self) -> Optional[int]:
"""
Routing weight for vpn connection.
"""
return pulumi.get(self, "routing_weight")
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> Optional[str]:
"""
SharedKey for the vpn connection.
"""
return pulumi.get(self, "shared_key")
@property
@pulumi.getter(name="useLocalAzureIpAddress")
def use_local_azure_ip_address(self) -> Optional[bool]:
"""
Use local azure ip to initiate connection.
"""
return pulumi.get(self, "use_local_azure_ip_address")
@property
@pulumi.getter(name="usePolicyBasedTrafficSelectors")
def use_policy_based_traffic_selectors(self) -> Optional[bool]:
"""
Enable policy-based traffic selectors.
"""
return pulumi.get(self, "use_policy_based_traffic_selectors")
@property
@pulumi.getter(name="vpnConnectionProtocolType")
def vpn_connection_protocol_type(self) -> Optional[str]:
"""
Connection protocol used for this connection.
"""
return pulumi.get(self, "vpn_connection_protocol_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class WebApplicationFirewallCustomRuleResponse(dict):
"""
Defines contents of a web application rule.
"""
def __init__(__self__, *,
action: str,
etag: str,
match_conditions: Sequence['outputs.MatchConditionResponse'],
priority: int,
rule_type: str,
name: Optional[str] = None):
"""
Defines contents of a web application rule.
:param str action: Type of Actions.
:param str etag: Gets a unique read-only string that changes whenever the resource is updated.
:param Sequence['MatchConditionResponseArgs'] match_conditions: List of match conditions.
:param int priority: Describes priority of the rule. Rules with a lower value will be evaluated before rules with a higher value.
:param str rule_type: Describes type of rule.
:param str name: Gets name of the resource that is unique within a policy. This name can be used to access the resource.
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "match_conditions", match_conditions)
pulumi.set(__self__, "priority", priority)
pulumi.set(__self__, "rule_type", rule_type)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def action(self) -> str:
"""
Type of Actions.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def etag(self) -> str:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="matchConditions")
def match_conditions(self) -> Sequence['outputs.MatchConditionResponse']:
"""
List of match conditions.
"""
return pulumi.get(self, "match_conditions")
@property
@pulumi.getter
def priority(self) -> int:
"""
Describes priority of the rule. Rules with a lower value will be evaluated before rules with a higher value.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter(name="ruleType")
def rule_type(self) -> str:
"""
Describes type of rule.
"""
return pulumi.get(self, "rule_type")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a policy. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| 39.978256
| 395
| 0.651487
|
acfdad40e3f9dacf1de63ecfbdeea32fb76faca3
| 4,318
|
py
|
Python
|
RUN_FILES/prepare_data/48AE/prepare_train_data_48AE.py
|
yifding/W2NER
|
d13128e45f3930a8b8faa794318939dc90a75974
|
[
"MIT"
] | null | null | null |
RUN_FILES/prepare_data/48AE/prepare_train_data_48AE.py
|
yifding/W2NER
|
d13128e45f3930a8b8faa794318939dc90a75974
|
[
"MIT"
] | null | null | null |
RUN_FILES/prepare_data/48AE/prepare_train_data_48AE.py
|
yifding/W2NER
|
d13128e45f3930a8b8faa794318939dc90a75974
|
[
"MIT"
] | null | null | null |
import os
import random
import argparse
import jsonlines
from functools import partial
from tqdm.contrib.concurrent import process_map
from tqdm import tqdm
random.seed(19940802)
def process_instance(instance, max_span_length=5, max_seq_length=300):
asin = instance.get('asin', '')
product_type = instance.get('product_type', '')
tokens = instance["X_text"]
tokens_lower = [token.lower() for token in tokens]
tokens_lower = tokens_lower[:max_seq_length]
gt_spans = []
att = list(instance['Y_values'].keys())[0]
attribute_values = instance['Y_values'][att]
for i in range(len(tokens_lower) - 1):
for j in range(i + 1, min(len(tokens_lower)+1, i+1+max_span_length)):
select_tokens = tokens_lower[i:j]
pred_text = ' '.join(select_tokens)
if pred_text not in attribute_values:
continue
gt_span = {
"index": list(range(i, j)),
"type": att,
}
gt_spans.append(gt_span)
gt_spans = sorted(gt_spans, key=lambda x: (x["index"][0], len(x["index"])))
re_instance = {
"asin": asin,
"product_type": product_type,
"ner": gt_spans,
"sentence": tokens_lower,
}
return re_instance
def prepare_train_data(
att_list,
input_dir,
output_file,
max_span_length,
max_seq_length,
mode,
):
instances = []
for att in att_list:
input_file = os.path.join(input_dir, att + '.' + mode)
with jsonlines.open(input_file, 'r') as reader:
for instance in reader:
instances.append(instance)
# re_instances = process_map(
# partial(process_instance, att_list=att_list, max_span_length=max_span_length),
# instances,
# chunksize=1,
# max_workers=8,
# )
random.shuffle(instances)
re_instances = []
for instance in tqdm(instances):
re_instance = process_instance(instance,max_span_length=max_span_length, max_seq_length=max_seq_length)
re_instances.append(re_instance)
with jsonlines.open(output_file, 'w') as writer:
writer.write_all(re_instances)
def parse_args():
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument(
"--input_dir",
required=True,
# default="/yifad_ebs/consumable/clean_test_data/Color.gold"
type=str,
)
parser.add_argument(
"--output_file",
required=True,
# default="test_color.jsonl",
type=str,
)
parser.add_argument(
"--output_dir",
required=True,
# default="/yifad_ebs/AVEQA_PyTorch/RUN_FILES/08_15_2021/qa_15att/train_qa_neg_1",
type=str,
)
parser.add_argument(
"--max_span_length",
default=5,
help="maximum length (number of words) of span",
type=int,
)
parser.add_argument(
"--max_seq_length",
default=300,
help="maximum length (number of words) of sequence",
type=int,
)
parser.add_argument(
"--att_list",
required=True,
# default="['ActiveIngredients','AgeRangeDescription','BatteryCellComposition','Brand','CaffeineContent','CapacityUnit','CoffeeRoastType','Color','DietType','DosageForm','EnergyUnit','FinishType','Flavor','FormulationType','HairType','Ingredients','ItemForm','ItemShape','LiquidContentsDescription','Material','MaterialFeature','MaterialTypeFree','PackageSizeName','Pattern','PatternType','ProductBenefit','Scent','SkinTone','SkinType','SpecialIngredients','TargetGender','TeaVariety','Variety']",
type=eval,
)
parser.add_argument(
"--mode",
required=True,
type=str,
choices=['train', 'dev', 'gold']
)
args = parser.parse_args()
assert os.path.isdir(args.input_dir)
os.makedirs(args.output_dir, exist_ok=True)
return args
def main():
args = parse_args()
output_file = os.path.join(args.output_dir, args.output_file)
prepare_train_data(
att_list=args.att_list,
input_dir=args.input_dir,
output_file=output_file,
max_span_length=args.max_span_length,
max_seq_length=args.max_seq_length,
mode=args.mode,
)
if __name__ == "__main__":
main()
| 29.986111
| 505
| 0.635248
|
acfdadfb81f369c78c783f1b32400870685a6acf
| 1,030
|
py
|
Python
|
coremltools/test/sklearn_tests/test_feature_names.py
|
LaudateCorpus1/coremltools
|
777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc
|
[
"BSD-3-Clause"
] | 2,740
|
2017-10-03T23:19:01.000Z
|
2022-03-30T15:16:39.000Z
|
coremltools/test/sklearn_tests/test_feature_names.py
|
LaudateCorpus1/coremltools
|
777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc
|
[
"BSD-3-Clause"
] | 1,057
|
2017-10-05T22:47:01.000Z
|
2022-03-31T23:51:15.000Z
|
coremltools/test/sklearn_tests/test_feature_names.py
|
LaudateCorpus1/coremltools
|
777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc
|
[
"BSD-3-Clause"
] | 510
|
2017-10-04T19:22:28.000Z
|
2022-03-31T12:16:52.000Z
|
# Copyright (c) 2017, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import coremltools.models._feature_management as fm
import coremltools.models.datatypes as dt
import unittest
from coremltools._deps import _HAS_SKLEARN
@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.")
class FeatureManagementTests(unittest.TestCase):
def test_all_strings(self):
features = ["a", "b", "c"]
processed_features = [
("a", dt.Double()),
("b", dt.Double()),
("c", dt.Double()),
]
out = fm.process_or_validate_features(features)
self.assertEqual(out, processed_features)
self.assertTrue(fm.is_valid_feature_list(out))
def test_single_array(self):
self.assertEqual(
fm.process_or_validate_features("a", num_dimensions=10),
[("a", dt.Array(10))],
)
| 34.333333
| 82
| 0.666019
|
acfdae07c02130338301a5869747a9b6590bdae4
| 1,221
|
py
|
Python
|
Project_Health/data/data_ready.py
|
Anonymous633671/STABILIZER
|
5a1ab8099a2d75ace7e053afc78055f1f4d359c0
|
[
"MIT"
] | null | null | null |
Project_Health/data/data_ready.py
|
Anonymous633671/STABILIZER
|
5a1ab8099a2d75ace7e053afc78055f1f4d359c0
|
[
"MIT"
] | null | null | null |
Project_Health/data/data_ready.py
|
Anonymous633671/STABILIZER
|
5a1ab8099a2d75ace7e053afc78055f1f4d359c0
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from scipy.io.arff import loadarff
import os
def data_github_monthly(repo_name, directory, goal):
df_raw = pd.read_csv(directory + repo_name, sep=',')
df_raw = df_raw.drop(columns=['dates'])
last_col = ''
if goal == 0:
last_col = 'monthly_commits'
elif goal == 1:
last_col = 'monthly_contributors'
elif goal == 2:
last_col = 'monthly_stargazer'
elif goal == 3:
last_col = 'monthly_open_PRs'
elif goal == 4:
last_col = 'monthly_closed_PRs'
elif goal == 5:
last_col = 'monthly_open_issues'
elif goal == 6:
last_col = 'monthly_closed_issues'
cols = list(df_raw.columns.values)
cols.pop(cols.index(last_col))
df_adjust = df_raw[cols+[last_col]]
return df_adjust
if __name__ == '__main__':
repo_pool = []
path = r'../data/data_use/'
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(data_github_monthly("abp_monthly.csv", path, 4))
for filename in os.listdir(path):
repo_pool.append(os.path.join(filename))
for repo in repo_pool:
print(data_github_monthly(repo, path, 4).iloc[-1,-1])
| 27.75
| 84
| 0.642916
|
acfdae38abb3d88fb975b97c470029b36e72d226
| 70
|
py
|
Python
|
py_scripts/times2.py
|
mtchem/gdi-beginner-py
|
7a876b8e07828ed552c61d97b756c1d0f37741ee
|
[
"MIT"
] | null | null | null |
py_scripts/times2.py
|
mtchem/gdi-beginner-py
|
7a876b8e07828ed552c61d97b756c1d0f37741ee
|
[
"MIT"
] | null | null | null |
py_scripts/times2.py
|
mtchem/gdi-beginner-py
|
7a876b8e07828ed552c61d97b756c1d0f37741ee
|
[
"MIT"
] | null | null | null |
print(2+4)
print(7+2)
print(int(15)/float(2.7))
print(int(7.3 + 8.9))
| 14
| 25
| 0.614286
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.