code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import logging
import re
from typing import ClassVar, Dict, Optional, Type, Any, List, Pattern, Union
from attr import field, evolve
from attrs import define
from resoto_plugin_aws.resource.base import AwsResource, GraphBuilder, AwsApiSpec, AwsRegion
from resotolib.baseresources import BaseQuota, EdgeType, ModelReference
from resotolib.json_bender import Bender, S, Bend
from resotolib.types import Json
from resoto_plugin_aws.aws_client import AwsClient
log = logging.getLogger("resoto.plugins.aws")
service_name = "service-quotas"
@define(eq=False, slots=False)
class AwsQuotaMetricInfo:
kind: ClassVar[str] = "aws_quota_metric_info"
mapping: ClassVar[Dict[str, Bender]] = {
"metric_namespace": S("MetricNamespace"),
"metric_name": S("MetricName"),
"metric_dimensions": S("MetricDimensions"),
"metric_statistic_recommendation": S("MetricStatisticRecommendation"),
}
metric_namespace: Optional[str] = field(default=None)
metric_name: Optional[str] = field(default=None)
metric_dimensions: Optional[Dict[str, str]] = field(default=None)
metric_statistic_recommendation: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsQuotaPeriod:
kind: ClassVar[str] = "aws_quota_period"
mapping: ClassVar[Dict[str, Bender]] = {"period_value": S("PeriodValue"), "period_unit": S("PeriodUnit")}
period_value: Optional[int] = field(default=None)
period_unit: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsQuotaErrorReason:
kind: ClassVar[str] = "aws_quota_error_reason"
mapping: ClassVar[Dict[str, Bender]] = {"error_code": S("ErrorCode"), "error_message": S("ErrorMessage")}
error_code: Optional[str] = field(default=None)
error_message: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsServiceQuota(AwsResource, BaseQuota):
kind: ClassVar[str] = "aws_service_quota"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": [
"aws_ec2_instance_type",
"aws_ec2_volume_type",
"aws_vpc",
"aws_elb",
"aws_alb",
"aws_iam_server_certificate",
]
}
}
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("QuotaCode"),
"name": S("QuotaName"),
"service_code": S("ServiceCode"),
"service_name": S("ServiceName"),
"arn": S("QuotaArn"),
"quota": S("Value"),
"quota_unit": S("Unit"),
"quota_adjustable": S("Adjustable"),
"quota_global": S("GlobalQuota"),
"quota_usage_metric": S("UsageMetric") >> Bend(AwsQuotaMetricInfo.mapping),
"quota_period": S("Period") >> Bend(AwsQuotaPeriod.mapping),
"quota_error_reason": S("ErrorReason") >> Bend(AwsQuotaErrorReason.mapping),
}
quota_unit: Optional[str] = field(default=None)
quota_adjustable: Optional[bool] = field(default=None)
quota_global: Optional[bool] = field(default=None)
quota_usage_metric: Optional[AwsQuotaMetricInfo] = field(default=None)
quota_period: Optional[AwsQuotaPeriod] = field(default=None)
quota_error_reason: Optional[AwsQuotaErrorReason] = field(default=None)
@classmethod
def called_collect_apis(cls) -> List[AwsApiSpec]:
return [
AwsApiSpec(service_name, "list-service-quotas", override_iam_permission="servicequotas:ListServiceQuotas")
]
@classmethod
def collect_service(cls, service_code: str, matchers: List["QuotaMatcher"], builder: GraphBuilder) -> None:
log.debug(f"Collecting Service quotas for {service_code} in region {builder.region.name}")
for js in builder.client.list(service_name, "list-service-quotas", "Quotas", ServiceCode=service_code):
if quota := AwsServiceQuota.from_api(js, builder):
for matcher in matchers:
if matcher.match(quota):
builder.add_node(quota, dict(source=js, matcher=evolve(matcher, region=builder.region)))
@classmethod
def collect_resources(cls: Type[AwsResource], builder: GraphBuilder) -> None:
# This collect run will be called for the global region as well as any configured region.
# We select the quotas to select based on the given region.
quotas = GlobalQuotas if builder.region.name == "global" else RegionalQuotas
for service, ms in quotas.items():
AwsServiceQuota.collect_service(service, ms, builder)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
super().connect_in_graph(builder, source)
matcher: Optional[QuotaMatcher] = source.get("matcher", None)
def prop_matches(attr: Any, expect: Any) -> bool:
if isinstance(expect, Pattern):
return expect.match(attr) is not None
else:
return bool(attr == expect)
if matcher:
for node in builder.graph.nodes:
if (
node.kind == matcher.node_kind
and (matcher.region is None or node.region().id == matcher.region.id)
and all(prop_matches(getattr(node, k, None), v) for k, v in matcher.node_selector.items())
):
builder.add_edge(self, EdgeType.default, node=node)
def update_resource_tag(self, client: AwsClient, key: str, value: str) -> bool:
client.call(
aws_service=service_name,
action="tag-resource",
result_name=None,
ResourceARN=self.arn,
Tags=[{"Key": key, "Value": value}],
)
return True
def delete_resource_tag(self, client: AwsClient, key: str) -> bool:
client.call(
aws_service=service_name,
action="untag-resource",
result_name=None,
ResourceARN=self.arn,
TagKeys=[key],
)
return True
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return [
AwsApiSpec(service_name, "tag-resource", override_iam_permission="servicequotas:TagResource"),
AwsApiSpec(service_name, "untag-resource", override_iam_permission="servicequotas:UntagResource"),
]
@classmethod
def service_name(cls) -> str:
return service_name
@define
class QuotaMatcher:
quota_name: Union[str, Pattern[str], None]
node_kind: str
node_selector: Dict[str, Any] = field(factory=dict)
region: Optional[AwsRegion] = None
def match(self, quota: AwsServiceQuota) -> bool:
if self.quota_name is None:
return False
elif isinstance(self.quota_name, Pattern):
return self.quota_name.match(quota.safe_name) is not None
else:
return self.quota_name == quota.safe_name
RegionalQuotas = {
"ec2": [
# Example: "Running On-Demand F instances" --> match InstanceTypes that start with F
QuotaMatcher(
quota_name=f"Running On-Demand {name} instances",
node_kind="aws_ec2_instance_type",
node_selector=dict(instance_type=re.compile("^" + start + "\\d")),
)
for name, start in {
"Standard (A, C, D, H, I, M, R, T, Z)": "[acdhimrtz]", # matches e.g. m4.large, i3en.3xlarge
"F": "f",
"G and VT": "g",
"P": "p",
"Inf": "inf",
"X": "x",
"High Memory instances": "u",
"DL": "dl",
}.items()
],
"ebs": [
QuotaMatcher(
quota_name=re.compile(name_pattern),
node_kind="aws_ec2_volume_type",
node_selector=dict(volume_type=volume_type),
)
for name_pattern, volume_type in {
"^Storage for.*gp2": "gp2",
"^Storage for.*gp3": "gp3",
"^Storage for.*standard": "standard",
"^Storage for.*io1": "io1",
"^Storage for.*io2": "io2",
"^Storage for.*sc1": "sc1",
"^Storage for.*st1": "st1",
}.items()
],
"vpc": [QuotaMatcher(quota_name="Internet gateways per Region", node_kind="aws_vpc")],
"elasticloadbalancing": [
QuotaMatcher(quota_name="Application Load Balancers per Region", node_kind="aws_alb"),
QuotaMatcher(quota_name="Classic Load Balancers per Region", node_kind="aws_elb"),
],
}
GlobalQuotas = {
"iam": [
QuotaMatcher(quota_name="Server certificates per account", node_kind="aws_iam_server_certificate"),
],
}
resources: List[Type[AwsResource]] = [AwsServiceQuota] | /resoto-plugin-aws-3.6.5.tar.gz/resoto-plugin-aws-3.6.5/resoto_plugin_aws/resource/service_quotas.py | 0.855157 | 0.220049 | service_quotas.py | pypi |
from typing import ClassVar, Dict, Optional, List, Type
from attrs import define, field
from resoto_plugin_aws.resource.base import AwsResource, AwsApiSpec, GraphBuilder
from resoto_plugin_aws.resource.ec2 import AwsEc2Instance
from resoto_plugin_aws.utils import ToDict
from resotolib.baseresources import BaseAutoScalingGroup, ModelReference
from resotolib.graph import Graph
from resotolib.json_bender import Bender, S, Bend, ForallBend
from resotolib.types import Json
from resoto_plugin_aws.aws_client import AwsClient
service_name = "autoscaling"
@define(eq=False, slots=False)
class AwsAutoScalingLaunchTemplateSpecification:
kind: ClassVar[str] = "aws_autoscaling_launch_template_specification"
mapping: ClassVar[Dict[str, Bender]] = {
"launch_template_id": S("LaunchTemplateId"),
"launch_template_name": S("LaunchTemplateName"),
"version": S("Version"),
}
launch_template_id: Optional[str] = field(default=None)
launch_template_name: Optional[str] = field(default=None)
version: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsAutoScalingMinMax:
kind: ClassVar[str] = "aws_autoscaling_min_max"
mapping: ClassVar[Dict[str, Bender]] = {"min": S("Min"), "max": S("Max")}
min: Optional[int] = field(default=None)
max: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class AwsAutoScalingInstanceRequirements:
kind: ClassVar[str] = "aws_autoscaling_instance_requirements"
mapping: ClassVar[Dict[str, Bender]] = {
"v_cpu_count": S("VCpuCount") >> Bend(AwsAutoScalingMinMax.mapping),
"memory_mi_b": S("MemoryMiB") >> Bend(AwsAutoScalingMinMax.mapping),
"cpu_manufacturers": S("CpuManufacturers", default=[]),
"memory_gi_b_per_v_cpu": S("MemoryGiBPerVCpu") >> Bend(AwsAutoScalingMinMax.mapping),
"excluded_instance_types": S("ExcludedInstanceTypes", default=[]),
"instance_generations": S("InstanceGenerations", default=[]),
"spot_max_price_percentage_over_lowest_price": S("SpotMaxPricePercentageOverLowestPrice"),
"on_demand_max_price_percentage_over_lowest_price": S("OnDemandMaxPricePercentageOverLowestPrice"),
"bare_metal": S("BareMetal"),
"burstable_performance": S("BurstablePerformance"),
"require_hibernate_support": S("RequireHibernateSupport"),
"network_interface_count": S("NetworkInterfaceCount") >> Bend(AwsAutoScalingMinMax.mapping),
"local_storage": S("LocalStorage"),
"local_storage_types": S("LocalStorageTypes", default=[]),
"total_local_storage_gb": S("TotalLocalStorageGB") >> Bend(AwsAutoScalingMinMax.mapping),
"baseline_ebs_bandwidth_mbps": S("BaselineEbsBandwidthMbps") >> Bend(AwsAutoScalingMinMax.mapping),
"accelerator_types": S("AcceleratorTypes", default=[]),
"accelerator_count": S("AcceleratorCount") >> Bend(AwsAutoScalingMinMax.mapping),
"accelerator_manufacturers": S("AcceleratorManufacturers", default=[]),
"accelerator_names": S("AcceleratorNames", default=[]),
"accelerator_total_memory_mi_b": S("AcceleratorTotalMemoryMiB") >> Bend(AwsAutoScalingMinMax.mapping),
}
v_cpu_count: Optional[AwsAutoScalingMinMax] = field(default=None)
memory_mi_b: Optional[AwsAutoScalingMinMax] = field(default=None)
cpu_manufacturers: List[str] = field(factory=list)
memory_gi_b_per_v_cpu: Optional[AwsAutoScalingMinMax] = field(default=None)
excluded_instance_types: List[str] = field(factory=list)
instance_generations: List[str] = field(factory=list)
spot_max_price_percentage_over_lowest_price: Optional[int] = field(default=None)
on_demand_max_price_percentage_over_lowest_price: Optional[int] = field(default=None)
bare_metal: Optional[str] = field(default=None)
burstable_performance: Optional[str] = field(default=None)
require_hibernate_support: Optional[bool] = field(default=None)
network_interface_count: Optional[AwsAutoScalingMinMax] = field(default=None)
local_storage: Optional[str] = field(default=None)
local_storage_types: List[str] = field(factory=list)
total_local_storage_gb: Optional[AwsAutoScalingMinMax] = field(default=None)
baseline_ebs_bandwidth_mbps: Optional[AwsAutoScalingMinMax] = field(default=None)
accelerator_types: List[str] = field(factory=list)
accelerator_count: Optional[AwsAutoScalingMinMax] = field(default=None)
accelerator_manufacturers: List[str] = field(factory=list)
accelerator_names: List[str] = field(factory=list)
accelerator_total_memory_mi_b: Optional[AwsAutoScalingMinMax] = field(default=None)
@define(eq=False, slots=False)
class AwsAutoScalingLaunchTemplateOverrides:
kind: ClassVar[str] = "aws_autoscaling_launch_template_overrides"
mapping: ClassVar[Dict[str, Bender]] = {
"instance_type": S("InstanceType"),
"weighted_capacity": S("WeightedCapacity"),
"launch_template_specification": S("LaunchTemplateSpecification")
>> Bend(AwsAutoScalingLaunchTemplateSpecification.mapping),
"instance_requirements": S("InstanceRequirements") >> Bend(AwsAutoScalingInstanceRequirements.mapping),
}
instance_type: Optional[str] = field(default=None)
weighted_capacity: Optional[str] = field(default=None)
launch_template_specification: Optional[AwsAutoScalingLaunchTemplateSpecification] = field(default=None)
instance_requirements: Optional[AwsAutoScalingInstanceRequirements] = field(default=None)
@define(eq=False, slots=False)
class AwsAutoScalingLaunchTemplate:
kind: ClassVar[str] = "aws_autoscaling_launch_template"
mapping: ClassVar[Dict[str, Bender]] = {
"launch_template_specification": S("LaunchTemplateSpecification")
>> Bend(AwsAutoScalingLaunchTemplateSpecification.mapping),
"overrides": S("Overrides", default=[]) >> ForallBend(AwsAutoScalingLaunchTemplateOverrides.mapping),
}
launch_template_specification: Optional[AwsAutoScalingLaunchTemplateSpecification] = field(default=None)
overrides: List[AwsAutoScalingLaunchTemplateOverrides] = field(factory=list)
@define(eq=False, slots=False)
class AwsAutoScalingInstancesDistribution:
kind: ClassVar[str] = "aws_autoscaling_instances_distribution"
mapping: ClassVar[Dict[str, Bender]] = {
"on_demand_allocation_strategy": S("OnDemandAllocationStrategy"),
"on_demand_base_capacity": S("OnDemandBaseCapacity"),
"on_demand_percentage_above_base_capacity": S("OnDemandPercentageAboveBaseCapacity"),
"spot_allocation_strategy": S("SpotAllocationStrategy"),
"spot_instance_pools": S("SpotInstancePools"),
"spot_max_price": S("SpotMaxPrice"),
}
on_demand_allocation_strategy: Optional[str] = field(default=None)
on_demand_base_capacity: Optional[int] = field(default=None)
on_demand_percentage_above_base_capacity: Optional[int] = field(default=None)
spot_allocation_strategy: Optional[str] = field(default=None)
spot_instance_pools: Optional[int] = field(default=None)
spot_max_price: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsAutoScalingMixedInstancesPolicy:
kind: ClassVar[str] = "aws_autoscaling_mixed_instances_policy"
mapping: ClassVar[Dict[str, Bender]] = {
"launch_template": S("LaunchTemplate") >> Bend(AwsAutoScalingLaunchTemplate.mapping),
"instances_distribution": S("InstancesDistribution") >> Bend(AwsAutoScalingInstancesDistribution.mapping),
}
launch_template: Optional[AwsAutoScalingLaunchTemplate] = field(default=None)
instances_distribution: Optional[AwsAutoScalingInstancesDistribution] = field(default=None)
@define(eq=False, slots=False)
class AwsAutoScalingInstance:
kind: ClassVar[str] = "aws_autoscaling_instance"
mapping: ClassVar[Dict[str, Bender]] = {
"instance_id": S("InstanceId"),
"instance_type": S("InstanceType"),
"availability_zone": S("AvailabilityZone"),
"lifecycle_state": S("LifecycleState"),
"health_status": S("HealthStatus"),
"launch_configuration_name": S("LaunchConfigurationName"),
"launch_template": S("LaunchTemplate") >> Bend(AwsAutoScalingLaunchTemplateSpecification.mapping),
"protected_from_scale_in": S("ProtectedFromScaleIn"),
"weighted_capacity": S("WeightedCapacity"),
}
instance_id: Optional[str] = field(default=None)
instance_type: Optional[str] = field(default=None)
availability_zone: Optional[str] = field(default=None)
lifecycle_state: Optional[str] = field(default=None)
health_status: Optional[str] = field(default=None)
launch_configuration_name: Optional[str] = field(default=None)
launch_template: Optional[AwsAutoScalingLaunchTemplateSpecification] = field(default=None)
protected_from_scale_in: Optional[bool] = field(default=None)
weighted_capacity: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsAutoScalingSuspendedProcess:
kind: ClassVar[str] = "aws_autoscaling_suspended_process"
mapping: ClassVar[Dict[str, Bender]] = {
"process_name": S("ProcessName"),
"suspension_reason": S("SuspensionReason"),
}
process_name: Optional[str] = field(default=None)
suspension_reason: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsAutoScalingEnabledMetric:
kind: ClassVar[str] = "aws_autoscaling_enabled_metric"
mapping: ClassVar[Dict[str, Bender]] = {"metric": S("Metric"), "granularity": S("Granularity")}
metric: Optional[str] = field(default=None)
granularity: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsAutoScalingWarmPoolConfiguration:
kind: ClassVar[str] = "aws_autoscaling_warm_pool_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"max_group_prepared_capacity": S("MaxGroupPreparedCapacity"),
"min_size": S("MinSize"),
"pool_state": S("PoolState"),
"status": S("Status"),
"instance_reuse_policy": S("InstanceReusePolicy", "ReuseOnScaleIn"),
}
max_group_prepared_capacity: Optional[int] = field(default=None)
min_size: Optional[int] = field(default=None)
pool_state: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
instance_reuse_policy: Optional[bool] = field(default=None)
@define(eq=False, slots=False)
class AwsAutoScalingGroup(AwsResource, BaseAutoScalingGroup):
kind: ClassVar[str] = "aws_autoscaling_group"
api_spec: ClassVar[AwsApiSpec] = AwsApiSpec(service_name, "describe-auto-scaling-groups", "AutoScalingGroups")
reference_kinds: ClassVar[ModelReference] = {
"successors": {"default": ["aws_ec2_instance"]},
"predecessors": {"delete": ["aws_ec2_instance"]},
}
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("AutoScalingGroupName"),
"tags": S("Tags", default=[]) >> ToDict(),
"name": S("AutoScalingGroupName"),
"ctime": S("CreatedTime"),
"arn": S("AutoScalingGroupARN"),
"autoscaling_launch_configuration_name": S("LaunchConfigurationName"),
"autoscaling_launch_template": S("LaunchTemplate") >> Bend(AwsAutoScalingLaunchTemplateSpecification.mapping),
"autoscaling_mixed_instances_policy": S("MixedInstancesPolicy")
>> Bend(AwsAutoScalingMixedInstancesPolicy.mapping),
"min_size": S("MinSize"),
"max_size": S("MaxSize"),
"autoscaling_desired_capacity": S("DesiredCapacity"),
"autoscaling_predicted_capacity": S("PredictedCapacity"),
"autoscaling_default_cooldown": S("DefaultCooldown"),
"autoscaling_availability_zones": S("AvailabilityZones", default=[]),
"autoscaling_load_balancer_names": S("LoadBalancerNames", default=[]),
"autoscaling_target_group_ar_ns": S("TargetGroupARNs", default=[]),
"autoscaling_health_check_type": S("HealthCheckType"),
"autoscaling_health_check_grace_period": S("HealthCheckGracePeriod"),
"autoscaling_instances": S("Instances", default=[]) >> ForallBend(AwsAutoScalingInstance.mapping),
"autoscaling_suspended_processes": S("SuspendedProcesses", default=[])
>> ForallBend(AwsAutoScalingSuspendedProcess.mapping),
"autoscaling_placement_group": S("PlacementGroup"),
"autoscaling_vpc_zone_identifier": S("VPCZoneIdentifier"),
"autoscaling_enabled_metrics": S("EnabledMetrics", default=[])
>> ForallBend(AwsAutoScalingEnabledMetric.mapping),
"autoscaling_status": S("Status"),
"autoscaling_termination_policies": S("TerminationPolicies", default=[]),
"autoscaling_new_instances_protected_from_scale_in": S("NewInstancesProtectedFromScaleIn"),
"autoscaling_service_linked_role_arn": S("ServiceLinkedRoleARN"),
"autoscaling_max_instance_lifetime": S("MaxInstanceLifetime"),
"autoscaling_capacity_rebalance": S("CapacityRebalance"),
"autoscaling_warm_pool_configuration": S("WarmPoolConfiguration")
>> Bend(AwsAutoScalingWarmPoolConfiguration.mapping),
"autoscaling_warm_pool_size": S("WarmPoolSize"),
"autoscaling_context": S("Context"),
"autoscaling_desired_capacity_type": S("DesiredCapacityType"),
"autoscaling_default_instance_warmup": S("DefaultInstanceWarmup"),
}
autoscaling_launch_configuration_name: Optional[str] = field(default=None)
autoscaling_launch_template: Optional[AwsAutoScalingLaunchTemplateSpecification] = field(default=None)
autoscaling_mixed_instances_policy: Optional[AwsAutoScalingMixedInstancesPolicy] = field(default=None)
autoscaling_predicted_capacity: Optional[int] = field(default=None)
autoscaling_default_cooldown: Optional[int] = field(default=None)
autoscaling_availability_zones: List[str] = field(factory=list)
autoscaling_load_balancer_names: List[str] = field(factory=list)
autoscaling_target_group_ar_ns: List[str] = field(factory=list)
autoscaling_health_check_type: Optional[str] = field(default=None)
autoscaling_health_check_grace_period: Optional[int] = field(default=None)
autoscaling_instances: List[AwsAutoScalingInstance] = field(factory=list)
autoscaling_suspended_processes: List[AwsAutoScalingSuspendedProcess] = field(factory=list)
autoscaling_placement_group: Optional[str] = field(default=None)
autoscaling_vpc_zone_identifier: Optional[str] = field(default=None)
autoscaling_enabled_metrics: List[AwsAutoScalingEnabledMetric] = field(factory=list)
autoscaling_status: Optional[str] = field(default=None)
autoscaling_termination_policies: List[str] = field(factory=list)
autoscaling_new_instances_protected_from_scale_in: Optional[bool] = field(default=None)
autoscaling_service_linked_role_arn: Optional[str] = field(default=None)
autoscaling_max_instance_lifetime: Optional[int] = field(default=None)
autoscaling_capacity_rebalance: Optional[bool] = field(default=None)
autoscaling_warm_pool_configuration: Optional[AwsAutoScalingWarmPoolConfiguration] = field(default=None)
autoscaling_warm_pool_size: Optional[int] = field(default=None)
autoscaling_context: Optional[str] = field(default=None)
autoscaling_desired_capacity_type: Optional[str] = field(default=None)
autoscaling_default_instance_warmup: Optional[int] = field(default=None)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
for instance in self.autoscaling_instances:
builder.dependant_node(self, clazz=AwsEc2Instance, id=instance.instance_id)
def update_resource_tag(self, client: AwsClient, key: str, value: str) -> bool:
client.call(
aws_service=service_name,
action="create-or-update-tags",
result_name=None,
Tags=[
{
"ResourceId": self.name,
"ResourceType": "auto-scaling-group",
"Key": key,
"Value": value,
"PropagateAtLaunch": False,
}
],
)
return True
def delete_resource_tag(self, client: AwsClient, key: str) -> bool:
client.call(
aws_service=service_name,
action="delete-tags",
result_name=None,
Tags=[
{
"ResourceId": self.name,
"ResourceType": "auto-scaling-group",
"Key": key,
}
],
)
return True
def delete_resource(self, client: AwsClient, graph: Graph) -> bool:
client.call(
aws_service=self.api_spec.service,
action="delete-auto-scaling-group",
result_name=None,
AutoScalingGroupName=self.name,
ForceDelete=True,
)
return True
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return [
AwsApiSpec(service_name, "create-or-update-tags"),
AwsApiSpec(service_name, "delete-tags"),
AwsApiSpec(service_name, "delete-auto-scaling-group"),
]
resources: List[Type[AwsResource]] = [AwsAutoScalingGroup] | /resoto-plugin-aws-3.6.5.tar.gz/resoto-plugin-aws-3.6.5/resoto_plugin_aws/resource/autoscaling.py | 0.772101 | 0.177063 | autoscaling.py | pypi |
import re
from datetime import datetime, timedelta
from typing import ClassVar, Dict, List, Optional, Type, Tuple, TypeVar
from attr import define, field
from resoto_plugin_aws.aws_client import AwsClient
from resoto_plugin_aws.resource.base import AwsApiSpec, AwsResource, GraphBuilder
from resoto_plugin_aws.resource.kms import AwsKmsKey
from resoto_plugin_aws.utils import ToDict, MetricNormalization
from resotolib.baseresources import ModelReference, BaseResource
from resotolib.graph import Graph
from resotolib.json import from_json
from resotolib.json_bender import S, Bend, Bender, ForallBend, bend, F, SecondsFromEpochToDatetime
from resotolib.types import Json
from resotolib.utils import chunks
service_name = "cloudwatch"
# noinspection PyUnresolvedReferences
class CloudwatchTaggable:
def update_resource_tag(self, client: AwsClient, key: str, value: str) -> bool:
client.call(
aws_service=service_name,
action="tag-resource",
result_name=None,
ResourceARN=self.arn, # type: ignore
Tags=[{"Key": key, "Value": value}],
)
return True
def delete_resource_tag(self, client: AwsClient, key: str) -> bool:
client.call(
aws_service=service_name,
action="untag-resource",
result_name=None,
ResourceARN=self.arn, # type: ignore
TagKeys=[key],
)
return True
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return [AwsApiSpec(service_name, "tag-resource"), AwsApiSpec(service_name, "untag-resource")]
# noinspection PyUnresolvedReferences
class LogsTaggable:
def update_resource_tag(self, client: AwsClient, key: str, value: str) -> bool:
if arn := self.arn: # type: ignore
if arn.endswith(":*"):
arn = arn[:-2]
client.call(
aws_service="logs",
action="tag-resource",
result_name=None,
resourceArn=arn,
tags={key: value},
)
return True
else:
return False
def delete_resource_tag(self, client: AwsClient, key: str) -> bool:
if arn := self.arn: # type: ignore
if arn.endswith(":*"):
arn = arn[:-2]
client.call(
aws_service="logs",
action="untag-resource",
result_name=None,
resourceArn=arn,
tagKeys=[key],
)
return True
else:
return False
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return [AwsApiSpec("logs", "tag-resource"), AwsApiSpec("logs", "untag-resource")]
@define(eq=False, slots=False)
class AwsCloudwatchDimension:
kind: ClassVar[str] = "aws_cloudwatch_dimension"
mapping: ClassVar[Dict[str, Bender]] = {"name": S("Name"), "value": S("Value")}
name: Optional[str] = field(default=None)
value: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsCloudwatchMetric:
kind: ClassVar[str] = "aws_cloudwatch_metric"
mapping: ClassVar[Dict[str, Bender]] = {
"namespace": S("Namespace"),
"metric_name": S("MetricName"),
"dimensions": S("Dimensions", default=[]) >> ForallBend(AwsCloudwatchDimension.mapping),
}
namespace: Optional[str] = field(default=None)
metric_name: Optional[str] = field(default=None)
dimensions: List[AwsCloudwatchDimension] = field(factory=list)
@define(eq=False, slots=False)
class AwsCloudwatchMetricStat:
kind: ClassVar[str] = "aws_cloudwatch_metric_stat"
mapping: ClassVar[Dict[str, Bender]] = {
"metric": S("Metric") >> Bend(AwsCloudwatchMetric.mapping),
"period": S("Period"),
"stat": S("Stat"),
"unit": S("Unit"),
}
metric: Optional[AwsCloudwatchMetric] = field(default=None)
period: Optional[int] = field(default=None)
stat: Optional[str] = field(default=None)
unit: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsCloudwatchMetricDataQuery:
kind: ClassVar[str] = "aws_cloudwatch_metric_data_query"
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("Id"),
"metric_stat": S("MetricStat") >> Bend(AwsCloudwatchMetricStat.mapping),
"expression": S("Expression"),
"label": S("Label"),
"return_data": S("ReturnData"),
"period": S("Period"),
"account_id": S("AccountId"),
}
id: Optional[str] = field(default=None)
metric_stat: Optional[AwsCloudwatchMetricStat] = field(default=None)
expression: Optional[str] = field(default=None)
label: Optional[str] = field(default=None)
return_data: Optional[bool] = field(default=None)
period: Optional[int] = field(default=None)
account_id: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsCloudwatchAlarm(CloudwatchTaggable, AwsResource):
kind: ClassVar[str] = "aws_cloudwatch_alarm"
api_spec: ClassVar[AwsApiSpec] = AwsApiSpec(service_name, "describe-alarms", "MetricAlarms")
reference_kinds: ClassVar[ModelReference] = {
"predecessors": {"default": ["aws_ec2_instance"], "delete": ["aws_ec2_instance"]},
}
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("AlarmName"),
"name": S("AlarmName"),
"mtime": S("AlarmConfigurationUpdatedTimestamp"),
"arn": S("AlarmArn"),
"cloudwatch_alarm_description": S("AlarmDescription"),
"cloudwatch_actions_enabled": S("ActionsEnabled"),
"cloudwatch_ok_actions": S("OKActions", default=[]),
"cloudwatch_alarm_actions": S("AlarmActions", default=[]),
"cloudwatch_insufficient_data_actions": S("InsufficientDataActions", default=[]),
"cloudwatch_state_value": S("StateValue"),
"cloudwatch_state_reason": S("StateReason"),
"cloudwatch_state_reason_data": S("StateReasonData"),
"cloudwatch_state_updated_timestamp": S("StateUpdatedTimestamp"),
"cloudwatch_metric_name": S("MetricName"),
"cloudwatch_namespace": S("Namespace"),
"cloudwatch_statistic": S("Statistic"),
"cloudwatch_extended_statistic": S("ExtendedStatistic"),
"cloudwatch_dimensions": S("Dimensions", default=[]) >> ForallBend(AwsCloudwatchDimension.mapping),
"cloudwatch_period": S("Period"),
"cloudwatch_unit": S("Unit"),
"cloudwatch_evaluation_periods": S("EvaluationPeriods"),
"cloudwatch_datapoints_to_alarm": S("DatapointsToAlarm"),
"cloudwatch_threshold": S("Threshold"),
"cloudwatch_comparison_operator": S("ComparisonOperator"),
"cloudwatch_treat_missing_data": S("TreatMissingData"),
"cloudwatch_evaluate_low_sample_count_percentile": S("EvaluateLowSampleCountPercentile"),
"cloudwatch_metrics": S("Metrics", default=[]) >> ForallBend(AwsCloudwatchMetricDataQuery.mapping),
"cloudwatch_threshold_metric_id": S("ThresholdMetricId"),
}
arn: Optional[str] = field(default=None)
cloudwatch_alarm_description: Optional[str] = field(default=None)
cloudwatch_actions_enabled: Optional[bool] = field(default=None)
cloudwatch_ok_actions: List[str] = field(factory=list)
cloudwatch_alarm_actions: List[str] = field(factory=list)
cloudwatch_insufficient_data_actions: List[str] = field(factory=list)
cloudwatch_state_value: Optional[str] = field(default=None)
cloudwatch_state_reason: Optional[str] = field(default=None)
cloudwatch_state_reason_data: Optional[str] = field(default=None)
cloudwatch_state_updated_timestamp: Optional[datetime] = field(default=None)
cloudwatch_metric_name: Optional[str] = field(default=None)
cloudwatch_namespace: Optional[str] = field(default=None)
cloudwatch_statistic: Optional[str] = field(default=None)
cloudwatch_extended_statistic: Optional[str] = field(default=None)
cloudwatch_dimensions: List[AwsCloudwatchDimension] = field(factory=list)
cloudwatch_period: Optional[int] = field(default=None)
cloudwatch_unit: Optional[str] = field(default=None)
cloudwatch_evaluation_periods: Optional[int] = field(default=None)
cloudwatch_datapoints_to_alarm: Optional[int] = field(default=None)
cloudwatch_threshold: Optional[float] = field(default=None)
cloudwatch_comparison_operator: Optional[str] = field(default=None)
cloudwatch_treat_missing_data: Optional[str] = field(default=None)
cloudwatch_evaluate_low_sample_count_percentile: Optional[str] = field(default=None)
cloudwatch_metrics: List[AwsCloudwatchMetricDataQuery] = field(factory=list)
cloudwatch_threshold_metric_id: Optional[str] = field(default=None)
@classmethod
def collect(cls: Type[AwsResource], json: List[Json], builder: GraphBuilder) -> None:
def add_tags(alarm: AwsCloudwatchAlarm) -> None:
tags = builder.client.list(service_name, "list-tags-for-resource", "Tags", ResourceARN=alarm.arn)
if tags:
alarm.tags = bend(ToDict(), tags)
for js in json:
if instance := cls.from_api(js, builder):
builder.add_node(instance, js)
builder.submit_work(service_name, add_tags, instance)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
super().connect_in_graph(builder, source)
for dimension in self.cloudwatch_dimensions:
builder.dependant_node(
self, reverse=True, delete_same_as_default=True, kind="aws_ec2_instance", id=dimension.value
)
def delete_resource(self, client: AwsClient, graph: Graph) -> bool:
client.call(aws_service=self.api_spec.service, action="delete-alarms", result_name=None, AlarmNames=[self.name])
return True
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return super().called_mutator_apis() + [AwsApiSpec(service_name, "delete-alarms")]
@define(eq=False, slots=False)
class AwsCloudwatchLogGroup(LogsTaggable, AwsResource):
kind: ClassVar[str] = "aws_cloudwatch_log_group"
api_spec: ClassVar[AwsApiSpec] = AwsApiSpec("logs", "describe-log-groups", "logGroups")
reference_kinds: ClassVar[ModelReference] = {
"successors": {"default": ["aws_kms_key"]},
"predecessors": {"delete": ["aws_kms_key"]},
}
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("logGroupName"),
"tags": S("Tags", default=[]) >> ToDict(),
"name": S("logGroupName"),
"ctime": S("creationTime") >> F(lambda x: x // 1000) >> SecondsFromEpochToDatetime(),
"arn": S("arn"),
"group_retention_in_days": S("retentionInDays"),
"group_metric_filter_count": S("metricFilterCount"),
"group_stored_bytes": S("storedBytes"),
"group_data_protection_status": S("dataProtectionStatus"),
}
group_retention_in_days: Optional[int] = field(default=None)
group_metric_filter_count: Optional[int] = field(default=None)
group_stored_bytes: Optional[int] = field(default=None)
group_data_protection_status: Optional[str] = field(default=None)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
if kms_key_id := source.get("kmsKeyId"):
builder.dependant_node(self, clazz=AwsKmsKey, id=AwsKmsKey.normalise_id(kms_key_id))
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return super().called_mutator_apis() + [AwsApiSpec("logs", "delete-log-group")]
def delete_resource(self, client: AwsClient, graph: Graph) -> bool:
client.call(aws_service="logs", action="delete-log-group", logGroupName=self.name)
return True
@define(eq=False, slots=False)
class AwsCloudwatchMetricTransformation:
kind: ClassVar[str] = "aws_cloudwatch_metric_transformation"
mapping: ClassVar[Dict[str, Bender]] = {
"metric_name": S("metricName"),
"metric_namespace": S("metricNamespace"),
"metric_value": S("metricValue"),
"default_value": S("defaultValue"),
"dimensions": S("dimensions"),
"unit": S("unit"),
}
metric_name: Optional[str] = field(default=None)
metric_namespace: Optional[str] = field(default=None)
metric_value: Optional[str] = field(default=None)
default_value: Optional[float] = field(default=None)
dimensions: Optional[Dict[str, str]] = field(default=None)
unit: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsCloudwatchMetricFilter(AwsResource):
kind: ClassVar[str] = "aws_cloudwatch_metric_filter"
api_spec: ClassVar[AwsApiSpec] = AwsApiSpec("logs", "describe-metric-filters", "metricFilters")
reference_kinds: ClassVar[ModelReference] = {
"predecessors": {"default": ["aws_cloudwatch_log_group"]},
"successors": {"default": ["aws_cloudwatch_alarm"], "delete": ["aws_cloudwatch_log_group"]},
}
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("filterName"),
"name": S("filterName"),
"ctime": S("creationTime") >> F(lambda x: x // 1000) >> SecondsFromEpochToDatetime(),
"filter_pattern": S("filterPattern"),
"filter_transformations": S("metricTransformations", default=[])
>> ForallBend(AwsCloudwatchMetricTransformation.mapping),
}
filter_pattern: Optional[str] = field(default=None)
filter_transformations: List[AwsCloudwatchMetricTransformation] = field(factory=list)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
if log_group_name := source.get("logGroupName"):
builder.dependant_node(self, reverse=True, clazz=AwsCloudwatchLogGroup, name=log_group_name)
for transformation in self.filter_transformations:
# every metric can be used by multiple alarms
for alarm in builder.nodes(
clazz=AwsCloudwatchAlarm,
cloudwatch_namespace=transformation.metric_namespace,
cloudwatch_metric_name=transformation.metric_name,
):
builder.add_edge(self, node=alarm)
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return super().called_mutator_apis() + [AwsApiSpec(service_name, "delete-metric-filter")]
def delete_resource(self, client: AwsClient, graph: Graph) -> bool:
if log_group := graph.search_first_parent_class(self, AwsCloudwatchLogGroup):
client.call(
aws_service=self.api_spec.service,
action="delete-metric-filter",
logGroupName=log_group.name,
filterName=self.name,
)
return True
return False
@define(hash=True, frozen=True)
class AwsCloudwatchQuery:
metric_name: str
namespace: str
dimensions: Tuple[Tuple[str, str], ...]
period: timedelta
ref_id: str
metric_id: str
stat: str = "Sum"
unit: str = "Count"
def to_json(self) -> Json:
return {
"Id": self.metric_id,
"MetricStat": {
"Metric": {
"Namespace": self.namespace,
"MetricName": self.metric_name,
"Dimensions": [{"Name": k, "Value": v} for k, v in self.dimensions],
},
"Period": int((self.period.total_seconds() / 60) * 60), # round to the next 60 seconds
"Stat": self.stat,
"Unit": self.unit,
},
"ReturnData": True,
}
@staticmethod
def create(
metric_name: str,
namespace: str,
period: timedelta,
ref_id: str,
metric_id: Optional[str] = None,
stat: str = "Sum",
unit: str = "Count",
**dimensions: str,
) -> "AwsCloudwatchQuery":
dims = "_".join(f"{k}+{v}" for k, v in dimensions.items())
rid = metric_id or re.sub("\\W", "_", f"{metric_name}-{namespace}-{dims}-{stat}".lower())
# noinspection PyTypeChecker
return AwsCloudwatchQuery(
metric_name=metric_name,
namespace=namespace,
period=period,
dimensions=tuple(dimensions.items()),
ref_id=ref_id,
metric_id=rid,
stat=stat,
unit=unit,
)
@define(eq=False, slots=False)
class AwsCloudwatchMessageData:
mapping: ClassVar[Dict[str, Bender]] = {"code": S("Code"), "value": S("Value")}
code: Optional[str] = field(default=None)
value: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsCloudwatchMetricData:
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("Id"),
"label": S("Label"),
"metric_timestamps": S("Timestamps", default=[]),
"metric_values": S("Values", default=[]),
"metric_status_code": S("StatusCode"),
"metric_messages": S("Messages", default=[]) >> ForallBend(AwsCloudwatchMessageData.mapping),
}
id: Optional[str] = field(default=None)
label: Optional[str] = field(default=None)
metric_timestamps: List[datetime] = field(factory=list)
metric_values: List[float] = field(factory=list)
metric_status_code: Optional[str] = field(default=None)
metric_messages: List[AwsCloudwatchMessageData] = field(factory=list)
def first_non_zero(self) -> Optional[Tuple[datetime, float]]:
for timestamp, value in zip(self.metric_timestamps, self.metric_values):
if value != 0:
return timestamp, value
return None
@classmethod
def called_collect_apis(cls) -> List[AwsApiSpec]:
return [AwsApiSpec(service_name, "get-metric-data")]
@staticmethod
def query_for(
client: AwsClient,
queries: List[AwsCloudwatchQuery],
start_time: datetime,
end_time: datetime,
scan_desc: bool = True,
) -> "Dict[AwsCloudwatchQuery, AwsCloudwatchMetricData]":
lookup = {q.metric_id: q for q in queries}
result: Dict[AwsCloudwatchQuery, AwsCloudwatchMetricData] = {}
# the api only allows for up to 500 metrics at once
for chunk in chunks(queries, 499):
part = client.list(
service_name,
"get-metric-data",
"MetricDataResults",
MetricDataQueries=[a.to_json() for a in chunk],
StartTime=start_time,
EndTime=end_time,
ScanBy="TimestampDescending" if scan_desc else "TimestampAscending",
)
for single in part:
metric = from_json(bend(AwsCloudwatchMetricData.mapping, single), AwsCloudwatchMetricData)
if metric.id:
result[lookup[metric.id]] = metric
return result
resources: List[Type[AwsResource]] = [AwsCloudwatchAlarm, AwsCloudwatchLogGroup, AwsCloudwatchMetricFilter]
V = TypeVar("V", bound=BaseResource)
def update_resource_metrics(
resources_map: Dict[str, V],
cloudwatch_result: Dict[AwsCloudwatchQuery, AwsCloudwatchMetricData],
metric_normalizers: Dict[str, MetricNormalization],
) -> None:
for query, metric in cloudwatch_result.items():
resource = resources_map.get(query.ref_id)
if resource is None:
continue
metric_value = next(iter(metric.metric_values), None)
if metric_value is None:
continue
normalizer = metric_normalizers.get(query.metric_name)
if not normalizer:
continue
name = normalizer.name
value = metric_normalizers[query.metric_name].normalize_value(metric_value)
resource._resource_usage[name][normalizer.stat_map[query.stat]] = value | /resoto-plugin-aws-3.6.5.tar.gz/resoto-plugin-aws-3.6.5/resoto_plugin_aws/resource/cloudwatch.py | 0.834103 | 0.151435 | cloudwatch.py | pypi |
from typing import ClassVar, Dict, List, Optional, Type
from attrs import define, field
from resoto_plugin_aws.aws_client import AwsClient
from resoto_plugin_aws.resource.base import AwsApiSpec, AwsResource, GraphBuilder
from resoto_plugin_aws.resource.kms import AwsKmsKey
from resoto_plugin_aws.resource.sns import AwsSnsTopic
from resotolib.baseresources import EdgeType, ModelReference
from resotolib.graph import Graph
from resotolib.json_bender import S, Bend, Bender, ForallBend
from resotolib.types import Json
service_name = "glacier"
@define(eq=False, slots=False)
class AwsGlacierInventoryRetrievalParameters:
kind: ClassVar[str] = "aws_glacier_job_inventory_retrieval_parameters"
mapping: ClassVar[Dict[str, Bender]] = {
"output_format": S("Format"),
"start_date": S("StartDate"),
"end_date": S("EndDate"),
"limit": S("Limit"),
}
output_format: Optional[str] = field(default=None)
start_date: Optional[str] = field(default=None)
end_date: Optional[str] = field(default=None)
limit: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsGlacierSelectParameters:
kind: ClassVar[str] = "aws_glacier_job_select_parameters"
mapping: ClassVar[Dict[str, Bender]] = {
"input_serialization": S("InputSerialization"),
"expression_type": S("ExpressionType"),
"expression": S("Expression"),
"output_serialization": S("OutputSerialization"),
}
input_serialization: Optional[Dict[str, Dict[str, str]]] = field(default=None)
expression_type: Optional[str] = field(default=None)
expression: Optional[str] = field(default=None)
output_serialization: Optional[Dict[str, Dict[str, str]]] = field(default=None)
@define(eq=False, slots=False)
class AwsGlacierBucketEncryption:
kind: ClassVar[str] = "aws_glacier_bucket_encryption"
mapping: ClassVar[Dict[str, Bender]] = {
"encryption_type": S("EncryptionType"),
"kms_key_id": S("KMSKeyId"),
"kms_context": S("KMSContext"),
}
encryption_type: Optional[str] = field(default=None)
kms_key_id: Optional[str] = field(default=None)
kms_context: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsGlacierAcl:
kind: ClassVar[str] = "aws_glacier_acl"
mapping: ClassVar[Dict[str, Bender]] = {
"grantee": S("Grantee"),
"permission": S("Permission"),
}
grantee: Optional[Dict[str, str]] = field(default=None)
permission: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsGlacierJobBucket:
kind: ClassVar[str] = "aws_glacier_job_bucket"
mapping: ClassVar[Dict[str, Bender]] = {
"bucket_name": S("BucketName"),
"prefix": S("Prefix"),
"encryption": S("Encryption") >> Bend(AwsGlacierBucketEncryption.mapping),
"canned_acl": S("CannedACL"),
"access_control_list": S("AccessControlList") >> ForallBend(AwsGlacierAcl.mapping),
"tagging": S("Tagging"),
"user_metadata": S("UserMetadata"),
"storage_class": S("StorageClass"),
}
bucket_name: Optional[str] = field(default=None)
prefix: Optional[str] = field(default=None)
encryption: Optional[AwsGlacierBucketEncryption] = field(default=None)
access_control_list: Optional[List[AwsGlacierAcl]] = field(default=None)
tagging: Optional[Dict[str, str]] = field(default=None)
user_metadata: Optional[Dict[str, str]] = field(default=None)
storage_class: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsGlacierJobOutputLocation:
kind: ClassVar[str] = "aws_glacier_job_output_location"
mapping: ClassVar[Dict[str, Bender]] = {
"s3": S("S3") >> Bend(AwsGlacierJobBucket.mapping),
}
s3: Optional[AwsGlacierJobBucket] = field(default=None)
@define(eq=False, slots=False)
class AwsGlacierJob(AwsResource):
kind: ClassVar[str] = "aws_glacier_job"
reference_kinds: ClassVar[ModelReference] = {
"predecessors": {
"delete": ["aws_kms_key"],
},
"successors": {"default": ["aws_kms_key"]},
}
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("JobId"),
"name": S("JobId"),
"ctime": S("CreationDate"),
"vault_arn": S("VaultARN"),
"description": S("JobDescription"),
"glacier_job_action": S("Action"),
"glacier_job_archive_id": S("ArchiveId"),
"glacier_job_vault_arn": S("VaultARN"),
"glacier_job_completed": S("Completed"),
"glacier_job_status_code": S("StatusCode"),
"glacier_job_status_message": S("StatusMessage"),
"glacier_job_archive_size_in_bytes": S("ArchiveSizeInBytes"),
"glacier_job_inventory_size_in_bytes": S("InventorySizeInBytes"),
"glacier_job_sns_topic": S("SNSTopic"),
"glacier_job_completion_date": S("CompletionDate"),
"glacier_job_sha256_tree_hash": S("SHA256TreeHash"),
"glacier_job_archive_sha256_tree_hash": S("ArchiveSHA256TreeHash"),
"glacier_job_retrieval_byte_range": S("RetrievalByteRange"),
"glacier_job_tier": S("Tier"),
"glacier_job_inventory_retrieval_parameters": S("InventoryRetrievalParameters")
>> Bend(AwsGlacierInventoryRetrievalParameters.mapping),
"glacier_job_output_path": S("JobOutputPath"),
"glacier_job_select_parameters": S("SelectParameters") >> Bend(AwsGlacierSelectParameters.mapping),
"glacier_job_output_location": S("OutputLocation") >> Bend(AwsGlacierJobOutputLocation.mapping),
}
description: Optional[str] = field(default=None)
glacier_job_action: Optional[str] = field(default=None)
glacier_job_archive_id: Optional[str] = field(default=None)
glacier_job_vault_arn: Optional[str] = field(default=None)
glacier_job_completed: Optional[bool] = field(default=None)
glacier_job_status_code: Optional[str] = field(default=None)
glacier_job_status_message: Optional[str] = field(default=None)
glacier_job_archive_size_in_bytes: Optional[int] = field(default=None)
glacier_job_inventory_size_in_bytes: Optional[int] = field(default=None)
glacier_job_sns_topic: Optional[str] = field(default=None)
glacier_job_completion_date: Optional[str] = field(default=None)
glacier_job_sha256_tree_hash: Optional[str] = field(default=None)
glacier_job_archive_sha256_tree_hash: Optional[str] = field(default=None)
glacier_job_retrieval_byte_range: Optional[str] = field(default=None)
glacier_job_tier: Optional[str] = field(default=None)
glacier_job_inventory_retrieval_parameters: Optional[AwsGlacierInventoryRetrievalParameters] = field(default=None)
glacier_job_output_path: Optional[str] = field(default=None)
glacier_job_select_parameters: Optional[AwsGlacierSelectParameters] = field(default=None)
glacier_job_output_location: Optional[AwsGlacierJobOutputLocation] = field(default=None)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
# noinspection PyUnboundLocalVariable
if (o := self.glacier_job_output_location) and (s3 := o.s3) and (e := s3.encryption) and (kid := e.kms_key_id):
builder.dependant_node(self, clazz=AwsKmsKey, id=AwsKmsKey.normalise_id(kid))
if self.glacier_job_sns_topic:
builder.add_edge(self, clazz=AwsSnsTopic, arn=self.glacier_job_sns_topic)
@classmethod
def service_name(cls) -> str:
return service_name
@define(eq=False, slots=False)
class AwsGlacierVault(AwsResource):
kind: ClassVar[str] = "aws_glacier_vault"
api_spec: ClassVar[AwsApiSpec] = AwsApiSpec(service_name, "list-vaults", "VaultList")
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["aws_glacier_job"],
}
}
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("VaultName"),
"name": S("VaultName"),
"ctime": S("CreationDate"),
"arn": S("VaultARN"),
"glacier_last_inventory_date": S("LastInventoryDate"),
"glacier_number_of_archives": S("NumberOfArchives"),
"glacier_size_in_bytes": S("SizeInBytes"),
}
glacier_last_inventory_date: Optional[str] = field(default=None)
glacier_number_of_archives: Optional[int] = field(default=None)
glacier_size_in_bytes: Optional[int] = field(default=None)
@classmethod
def called_collect_apis(cls) -> List[AwsApiSpec]:
return [
cls.api_spec,
AwsApiSpec(cls.api_spec.service, "list-tags-for-vault"),
AwsApiSpec(cls.api_spec.service, "list-jobs"),
]
@classmethod
def collect(cls: Type[AwsResource], json: List[Json], builder: GraphBuilder) -> None:
def add_tags(vault: AwsGlacierVault) -> None:
tags = builder.client.get(service_name, "list-tags-for-vault", "Tags", vaultName=vault.name)
if tags:
vault.tags = tags
for vault in json:
if vault_instance := cls.from_api(vault, builder):
builder.add_node(vault_instance, vault)
builder.submit_work(service_name, add_tags, vault_instance)
for job in builder.client.list(service_name, "list-jobs", "JobList", vaultName=vault_instance.name):
if job_instance := AwsGlacierJob.from_api(job, builder):
builder.add_node(job_instance, job)
builder.add_edge(vault_instance, EdgeType.default, node=job_instance)
def update_resource_tag(self, client: AwsClient, key: str, value: str) -> bool:
client.call(
aws_service=service_name,
action="add-tags-to-vault",
result_name=None,
vaultName=self.name,
Tags={key: value},
)
return True
def delete_resource_tag(self, client: AwsClient, key: str) -> bool:
client.call(
aws_service=service_name,
action="remove-tags-from-vault",
result_name=None,
vaultName=self.name,
TagKeys=[key],
)
return True
def delete_resource(self, client: AwsClient, graph: Graph) -> bool:
client.call(aws_service=service_name, action="delete-vault", result_name=None, vaultName=self.name)
return True
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return [
AwsApiSpec(service_name, "add-tags-to-vault"),
AwsApiSpec(service_name, "remove-tags-from-vault"),
AwsApiSpec(service_name, "delete-vault"),
]
resources: List[Type[AwsResource]] = [AwsGlacierVault, AwsGlacierJob] | /resoto-plugin-aws-3.6.5.tar.gz/resoto-plugin-aws-3.6.5/resoto_plugin_aws/resource/glacier.py | 0.802207 | 0.193929 | glacier.py | pypi |
from typing import ClassVar, Dict, Optional, List
from attrs import define, field
from resoto_plugin_aws.resource.base import AwsResource, GraphBuilder, AwsApiSpec
from resoto_plugin_aws.resource.kms import AwsKmsKey
from resotolib.baseresources import ModelReference
from resotolib.graph import Graph
from resotolib.json_bender import Bender, S, Bend, bend, ForallBend
from resotolib.types import Json
from resoto_plugin_aws.aws_client import AwsClient
from resoto_plugin_aws.utils import ToDict
from typing import Type
service_name = "kinesis"
@define(eq=False, slots=False)
class AwsKinesisHashKeyRange:
kind: ClassVar[str] = "aws_kinesis_hash_key_range"
mapping: ClassVar[Dict[str, Bender]] = {
"starting_hash_key": S("StartingHashKey"),
"ending_hash_key": S("EndingHashKey"),
}
starting_hash_key: Optional[str] = field(default=None)
ending_hash_key: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsKinesisSequenceNumberRange:
kind: ClassVar[str] = "aws_kinesis_sequence_number_range"
mapping: ClassVar[Dict[str, Bender]] = {
"starting_sequence_number": S("StartingSequenceNumber"),
"ending_sequence_number": S("EndingSequenceNumber"),
}
starting_sequence_number: Optional[str] = field(default=None)
ending_sequence_number: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsKinesisShard:
kind: ClassVar[str] = "aws_kinesis_shard"
mapping: ClassVar[Dict[str, Bender]] = {
"shard_id": S("ShardId"),
"parent_shard_id": S("ParentShardId"),
"adjacent_parent_shard_id": S("AdjacentParentShardId"),
"hash_key_range": S("HashKeyRange") >> Bend(AwsKinesisHashKeyRange.mapping),
"sequence_number_range": S("SequenceNumberRange") >> Bend(AwsKinesisSequenceNumberRange.mapping),
}
shard_id: Optional[str] = field(default=None)
parent_shard_id: Optional[str] = field(default=None)
adjacent_parent_shard_id: Optional[str] = field(default=None)
hash_key_range: Optional[AwsKinesisHashKeyRange] = field(default=None)
sequence_number_range: Optional[AwsKinesisSequenceNumberRange] = field(default=None)
@define(eq=False, slots=False)
class AwsKinesisEnhancedMetrics:
kind: ClassVar[str] = "aws_kinesis_enhanced_metrics"
mapping: ClassVar[Dict[str, Bender]] = {"shard_level_metrics": S("ShardLevelMetrics", default=[])}
shard_level_metrics: List[str] = field(factory=list)
@define(eq=False, slots=False)
class AwsKinesisStream(AwsResource):
kind: ClassVar[str] = "aws_kinesis_stream"
reference_kinds: ClassVar[ModelReference] = {
"predecessors": {
"delete": ["aws_kms_key"],
},
"successors": {"default": ["aws_kms_key"]},
}
api_spec: ClassVar[AwsApiSpec] = AwsApiSpec(service_name, "list-streams", "StreamNames")
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("StreamName"),
"tags": S("Tags", default=[]) >> ToDict(),
"name": S("StreamName"),
"ctime": S("StreamCreationTimestamp"),
"mtime": S("StreamCreationTimestamp"),
"atime": S("StreamCreationTimestamp"),
"arn": S("StreamARN"),
"kinesis_stream_name": S("StreamName"),
"kinesis_stream_status": S("StreamStatus"),
"kinesis_stream_mode_details": S("StreamModeDetails", "StreamMode"),
"kinesis_shards": S("Shards", default=[]) >> ForallBend(AwsKinesisShard.mapping),
"kinesis_has_more_shards": S("HasMoreShards"),
"kinesis_retention_period_hours": S("RetentionPeriodHours"),
"kinesis_enhanced_monitoring": S("EnhancedMonitoring", default=[])
>> ForallBend(AwsKinesisEnhancedMetrics.mapping),
"kinesis_encryption_type": S("EncryptionType"),
"kinesis_key_id": S("KeyId"),
}
kinesis_stream_status: Optional[str] = field(default=None)
kinesis_stream_mode_details: Optional[str] = field(default=None)
kinesis_shards: List[AwsKinesisShard] = field(factory=list)
kinesis_has_more_shards: Optional[bool] = field(default=None)
kinesis_retention_period_hours: Optional[int] = field(default=None)
kinesis_enhanced_monitoring: List[AwsKinesisEnhancedMetrics] = field(factory=list)
kinesis_encryption_type: Optional[str] = field(default=None)
kinesis_key_id: Optional[str] = field(default=None)
@classmethod
def called_collect_apis(cls) -> List[AwsApiSpec]:
return [
cls.api_spec,
AwsApiSpec(service_name, "describe-stream"),
AwsApiSpec(service_name, "list-tags-for-stream"),
]
@classmethod
def collect(cls: Type[AwsResource], json: List[Json], builder: GraphBuilder) -> None:
def add_tags(stream: AwsKinesisStream) -> None:
tags = builder.client.list(stream.api_spec.service, "list-tags-for-stream", "Tags", StreamName=stream.name)
if tags:
stream.tags = bend(ToDict(), tags)
for stream_name in json:
# this call is paginated and will return a list
stream_descriptions = builder.client.list(
aws_service=service_name,
action="describe-stream",
result_name="StreamDescription",
StreamName=stream_name,
)
if len(stream_descriptions) == 1:
if stream := AwsKinesisStream.from_api(stream_descriptions[0], builder):
builder.add_node(stream)
builder.submit_work(service_name, add_tags, stream)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
if self.kinesis_key_id:
builder.dependant_node(self, clazz=AwsKmsKey, id=AwsKmsKey.normalise_id(self.kinesis_key_id))
def update_resource_tag(self, client: AwsClient, key: str, value: str) -> bool:
client.call(
aws_service=self.api_spec.service,
action="add-tags-to-stream",
result_name=None,
StreamName=self.name,
Tags={key: value},
)
return True
def delete_resource_tag(self, client: AwsClient, key: str) -> bool:
client.call(
aws_service=self.api_spec.service,
action="remove-tags-from-stream",
result_name=None,
StreamName=self.name,
TagKeys=[key],
)
return True
def delete_resource(self, client: AwsClient, graph: Graph) -> bool:
client.call(
aws_service=self.api_spec.service,
action="delete-stream",
result_name=None,
StreamName=self.name,
)
return True
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return [
AwsApiSpec(service_name, "add-tags-to-stream"),
AwsApiSpec(service_name, "remove-tags-from-stream"),
AwsApiSpec(service_name, "delete-stream"),
]
resources: List[Type[AwsResource]] = [AwsKinesisStream] | /resoto-plugin-aws-3.6.5.tar.gz/resoto-plugin-aws-3.6.5/resoto_plugin_aws/resource/kinesis.py | 0.841435 | 0.284539 | kinesis.py | pypi |
import json
from datetime import datetime
from functools import lru_cache
from typing import Any, ClassVar, Dict, List, Optional
from attr import field, frozen
from botocore.loaders import Loader
from resoto_plugin_aws.aws_client import AwsClient
from resotolib.json import from_json
from resotolib.json_bender import Bend, Bender, F, ForallBend, MapDict, S, bend
from resotolib.types import Json
from resoto_plugin_aws.utils import arn_partition_by_region
service_name = "pricing"
EBS_TO_PRICING_NAMES = {
"standard": "Magnetic",
"gp2": "General Purpose",
"gp3": "General Purpose",
"io1": "Provisioned IOPS",
"st1": "Throughput Optimized HDD",
"sc1": "Cold HDD",
}
@lru_cache(maxsize=None)
def partition_index() -> Dict[str, int]:
"""Return a mapping from partition name to partition index."""
index_map = {}
try:
endpoints = Loader().load_data("endpoints")
except Exception:
pass
else:
for idx, partition in enumerate(endpoints.get("partitions", [])):
regions = partition.get("regions", {}).keys()
if "us-east-1" in regions:
index_map["aws"] = idx
elif "us-gov-west-1" in regions:
index_map["aws-us-gov"] = idx
elif "cn-north-1" in regions:
index_map["aws-cn"] = idx
return index_map
@lru_cache(maxsize=None)
def pricing_region(region: str) -> str:
idx = partition_index().get(arn_partition_by_region(region), 0)
endpoints = Loader().load_data("endpoints")
name: Optional[str] = bend(S("partitions")[idx] >> S("regions", region, "description"), endpoints)
if name is None:
raise ValueError(f"Unknown pricing region: {region}")
return name.replace("Europe", "EU") # note: Europe is named differently in the price list
@frozen(eq=False)
class AwsPricingProduct:
kind: ClassVar[str] = "aws_pricing_product"
mapping: ClassVar[Dict[str, Bender]] = {
"product_family": S("productFamily"),
"sku": S("sku"),
"attributes": S("attributes"),
}
product_family: Optional[str] = None
attributes: Optional[Dict[str, str]] = field(default=None)
sku: Optional[str] = None
@frozen(eq=False)
class AwsPricingPriceDimension:
kind: ClassVar[str] = "aws_pricing_price_dimension"
mapping: ClassVar[Dict[str, Bender]] = {
"unit": S("unit"),
"end_range": S("endRange"),
"description": S("description"),
"applies_to": S("appliesTo"),
"rate_code": S("rateCode"),
"begin_range": S("beginRange"),
"price_per_unit": S("pricePerUnit") >> MapDict(value_bender=F(float)),
}
unit: Optional[str] = None
end_range: Optional[str] = None
description: Optional[str] = None
applies_to: List[Any] = field(factory=list)
rate_code: Optional[str] = None
begin_range: Optional[str] = None
price_per_unit: Dict[str, float] = field(factory=dict)
@frozen(eq=False)
class AwsPricingTerm:
kind: ClassVar[str] = "aws_pricing_term"
mapping: ClassVar[Dict[str, Bender]] = {
"sku": S("sku"),
"effective_date": S("effectiveDate"),
"offer_term_code": S("offerTermCode"),
"term_attributes": S("termAttributes"),
"price_dimensions": S("priceDimensions")
>> F(lambda x: list(x.values()))
>> ForallBend(AwsPricingPriceDimension.mapping),
}
sku: Optional[str] = None
effective_date: Optional[datetime] = None
offer_term_code: Optional[str] = None
term_attributes: Dict[str, str] = field(factory=dict)
price_dimensions: List[AwsPricingPriceDimension] = field(factory=list)
@frozen(eq=False)
class AwsPricingPrice:
kind: ClassVar[str] = "aws_pricing_price"
mapping: ClassVar[Dict[str, Bender]] = {
"product": S("product") >> Bend(AwsPricingProduct.mapping),
"service_code": S("serviceCode"),
"terms": S("terms")
>> MapDict(
value_bender=F(lambda x: list(x.values()) if isinstance(x, dict) else [])
>> ForallBend(AwsPricingTerm.mapping)
),
}
product: Optional[AwsPricingProduct] = None
service_code: Optional[str] = None
terms: Dict[str, List[AwsPricingTerm]] = field(factory=dict)
@property
def on_demand_price_usd(self) -> float:
if terms := self.terms.get("OnDemand", []):
if dim := terms[0].price_dimensions:
return dim[0].price_per_unit.get("USD", 0)
return 0
@classmethod
def single_price_for(
cls, client: AwsClient, service_code: str, search_filter: List[Json]
) -> "Optional[AwsPricingPrice]":
# Prices are only available in the global region
prices = client.global_region.list(
service_name, "get-products", "PriceList", ServiceCode=service_code, Filters=search_filter, MaxResults=1
)
return from_json(bend(cls.mapping, json.loads(prices[0])), AwsPricingPrice) if prices else None
@classmethod
def volume_type_price(cls, client: AwsClient, volume_type: str, region: str) -> "Optional[AwsPricingPrice]":
if volume_type not in EBS_TO_PRICING_NAMES:
return None
search_filter = [
{"Type": "TERM_MATCH", "Field": "volumeType", "Value": EBS_TO_PRICING_NAMES[volume_type]},
{"Type": "TERM_MATCH", "Field": "volumeApiName", "Value": volume_type},
{"Type": "TERM_MATCH", "Field": "location", "Value": pricing_region(region)},
]
return cls.single_price_for(client, "AmazonEC2", search_filter)
@classmethod
def instance_type_price(cls, client: AwsClient, instance_type: str, region: str) -> "Optional[AwsPricingPrice]":
search_filter = [
{"Type": "TERM_MATCH", "Field": "operatingSystem", "Value": "Linux"},
{"Type": "TERM_MATCH", "Field": "operation", "Value": "RunInstances"},
{"Type": "TERM_MATCH", "Field": "capacitystatus", "Value": "Used"},
{"Type": "TERM_MATCH", "Field": "tenancy", "Value": "Shared"},
{"Type": "TERM_MATCH", "Field": "instanceType", "Value": instance_type},
{"Type": "TERM_MATCH", "Field": "location", "Value": pricing_region(region)},
]
return cls.single_price_for(client, "AmazonEC2", search_filter)
resources = [AwsPricingPrice] | /resoto-plugin-aws-3.6.5.tar.gz/resoto-plugin-aws-3.6.5/resoto_plugin_aws/resource/pricing.py | 0.815085 | 0.315354 | pricing.py | pypi |
from datetime import datetime
from typing import ClassVar, Dict, Optional, List, Any, Type
from attr import define, field
from resoto_plugin_azure.azure_client import AzureApiSpec
from resoto_plugin_azure.resource.base import AzureResource
from resotolib.json_bender import Bender, S, Bend, ForallBend, K
@define(eq=False, slots=False)
class AzureInstanceViewStatus:
kind: ClassVar[str] = "azure_instance_view_status"
mapping: ClassVar[Dict[str, Bender]] = {
"code": S("code"),
"display_status": S("displayStatus"),
"level": S("level"),
"message": S("message"),
"time": S("time"),
}
code: Optional[str] = field(default=None, metadata={"description": "The status code."})
display_status: Optional[str] = field(default=None, metadata={'description': 'The short localizable label for the status.'}) # fmt: skip
level: Optional[str] = field(default=None, metadata={"description": "The level code."})
message: Optional[str] = field(default=None, metadata={'description': 'The detailed status message, including for alerts and error messages.'}) # fmt: skip
time: Optional[datetime] = field(default=None, metadata={"description": "The time of the status."})
@define(eq=False, slots=False)
class AzureSku:
kind: ClassVar[str] = "azure_sku"
mapping: ClassVar[Dict[str, Bender]] = {"capacity": S("capacity"), "name": S("name"), "tier": S("tier")}
capacity: Optional[int] = field(default=None, metadata={'description': 'Specifies the number of virtual machines in the scale set.'}) # fmt: skip
name: Optional[str] = field(default=None, metadata={"description": "The sku name."})
tier: Optional[str] = field(default=None, metadata={'description': 'Specifies the tier of virtual machines in a scale set. Possible values: **standard** **basic**.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureAvailabilitySet(AzureResource):
kind: ClassVar[str] = "azure_availability_set"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2023-03-01",
path="/subscriptions/{subscriptionId}/providers/Microsoft.Compute/availabilitySets",
path_parameters=["subscriptionId"],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": K(None),
"mtime": K(None),
"atime": K(None),
"platform_fault_domain_count": S("properties", "platformFaultDomainCount"),
"platform_update_domain_count": S("properties", "platformUpdateDomainCount"),
"proximity_placement_group": S("properties", "proximityPlacementGroup", "id"),
"sku": S("sku") >> Bend(AzureSku.mapping),
"statuses": S("properties", "statuses") >> ForallBend(AzureInstanceViewStatus.mapping),
"virtual_machines_availability": S("properties") >> S("virtualMachines", default=[]) >> ForallBend(S("id")),
}
platform_fault_domain_count: Optional[int] = field(default=None, metadata={"description": "Fault domain count."})
platform_update_domain_count: Optional[int] = field(default=None, metadata={"description": "Update domain count."})
proximity_placement_group: Optional[str] = field(default=None, metadata={"description": ""})
sku: Optional[AzureSku] = field(default=None, metadata={'description': 'Describes a virtual machine scale set sku. Note: if the new vm sku is not supported on the hardware the scale set is currently on, you need to deallocate the vms in the scale set before you modify the sku name.'}) # fmt: skip
statuses: Optional[List[AzureInstanceViewStatus]] = field(default=None, metadata={'description': 'The resource status information.'}) # fmt: skip
virtual_machines_availability: Optional[List[str]] = field(default=None, metadata={'description': 'A list of references to all virtual machines in the availability set.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureCapacityReservationGroupInstanceView:
kind: ClassVar[str] = "azure_capacity_reservation_group_instance_view"
mapping: ClassVar[Dict[str, Bender]] = {
"capacity_reservations": S("capacityReservations", default=[]) >> ForallBend(S("name"))
}
capacity_reservations: Optional[List[str]] = field(default=None, metadata={'description': 'List of instance view of the capacity reservations under the capacity reservation group.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureCapacityReservationGroup(AzureResource):
kind: ClassVar[str] = "azure_capacity_reservation_group"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2023-03-01",
path="/subscriptions/{subscriptionId}/providers/Microsoft.Compute/capacityReservationGroups",
path_parameters=["subscriptionId"],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": K(None),
"mtime": K(None),
"atime": K(None),
"capacity_reservations": S("properties") >> S("capacityReservations", default=[]) >> ForallBend(S("id")),
"reservation_group_instance_view": S("properties", "instanceView")
>> Bend(AzureCapacityReservationGroupInstanceView.mapping),
"virtual_machines_associated": S("properties")
>> S("virtualMachinesAssociated", default=[])
>> ForallBend(S("id")),
}
capacity_reservations: Optional[List[str]] = field(default=None, metadata={'description': 'A list of all capacity reservation resource ids that belong to capacity reservation group.'}) # fmt: skip
reservation_group_instance_view: Optional[AzureCapacityReservationGroupInstanceView] = field(default=None, metadata={'description': ''}) # fmt: skip
virtual_machines_associated: Optional[List[str]] = field(default=None, metadata={'description': 'A list of references to all virtual machines associated to the capacity reservation group.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureCloudServiceRoleSku:
kind: ClassVar[str] = "azure_cloud_service_role_sku"
mapping: ClassVar[Dict[str, Bender]] = {"capacity": S("capacity"), "name": S("name"), "tier": S("tier")}
capacity: Optional[int] = field(default=None, metadata={'description': 'Specifies the number of role instances in the cloud service.'}) # fmt: skip
name: Optional[str] = field(default=None, metadata={'description': 'The sku name. Note: if the new sku is not supported on the hardware the cloud service is currently on, you need to delete and recreate the cloud service or move back to the old sku.'}) # fmt: skip
tier: Optional[str] = field(default=None, metadata={'description': 'Specifies the tier of the cloud service. Possible values are **standard** **basic**.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureCloudServiceRoleProfileProperties:
kind: ClassVar[str] = "azure_cloud_service_role_profile_properties"
mapping: ClassVar[Dict[str, Bender]] = {
"name": S("name"),
"sku": S("sku") >> Bend(AzureCloudServiceRoleSku.mapping),
}
name: Optional[str] = field(default=None, metadata={"description": "Resource name."})
sku: Optional[AzureCloudServiceRoleSku] = field(default=None, metadata={'description': 'Describes the cloud service role sku.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureCloudServiceRoleProfile:
kind: ClassVar[str] = "azure_cloud_service_role_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"roles": S("roles") >> ForallBend(AzureCloudServiceRoleProfileProperties.mapping)
}
roles: Optional[List[AzureCloudServiceRoleProfileProperties]] = field(default=None, metadata={'description': 'List of roles for the cloud service.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureCloudServiceVaultSecretGroup:
kind: ClassVar[str] = "azure_cloud_service_vault_secret_group"
mapping: ClassVar[Dict[str, Bender]] = {
"source_vault": S("sourceVault", "id"),
"vault_certificates": S("vaultCertificates", default=[]) >> ForallBend(S("certificateUrl")),
}
source_vault: Optional[str] = field(default=None, metadata={"description": ""})
vault_certificates: Optional[List[str]] = field(default=None, metadata={'description': 'The list of key vault references in sourcevault which contain certificates.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureCloudServiceOsProfile:
kind: ClassVar[str] = "azure_cloud_service_os_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"secrets": S("secrets") >> ForallBend(AzureCloudServiceVaultSecretGroup.mapping)
}
secrets: Optional[List[AzureCloudServiceVaultSecretGroup]] = field(default=None, metadata={'description': 'Specifies set of certificates that should be installed onto the role instances.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureLoadBalancerFrontendIpConfiguration:
kind: ClassVar[str] = "azure_load_balancer_frontend_ip_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"name": S("name"),
"private_ip_address": S("properties", "privateIPAddress"),
"public_ip_address": S("properties", "publicIPAddress", "id"),
"subnet": S("properties", "subnet", "id"),
}
name: Optional[str] = field(default=None, metadata={'description': 'The name of the resource that is unique within the set of frontend ip configurations used by the load balancer. This name can be used to access the resource.'}) # fmt: skip
private_ip_address: Optional[str] = field(default=None, metadata={'description': 'The virtual network private ip address of the ip configuration.'}) # fmt: skip
public_ip_address: Optional[str] = field(default=None, metadata={"description": ""})
subnet: Optional[str] = field(default=None, metadata={"description": ""})
@define(eq=False, slots=False)
class AzureLoadBalancerConfiguration:
kind: ClassVar[str] = "azure_load_balancer_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"frontend_ip_configurations": S("properties", "frontendIpConfigurations")
>> ForallBend(AzureLoadBalancerFrontendIpConfiguration.mapping),
"id": S("id"),
"name": S("name"),
}
frontend_ip_configurations: Optional[List[AzureLoadBalancerFrontendIpConfiguration]] = field(default=None, metadata={'description': 'Specifies the frontend ip to be used for the load balancer. Only ipv4 frontend ip address is supported. Each load balancer configuration must have exactly one frontend ip configuration.'}) # fmt: skip
id: Optional[str] = field(default=None, metadata={"description": "Resource id."})
name: Optional[str] = field(default=None, metadata={"description": "The name of the load balancer."})
@define(eq=False, slots=False)
class AzureCloudServiceNetworkProfile:
kind: ClassVar[str] = "azure_cloud_service_network_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"load_balancer_configurations": S("loadBalancerConfigurations")
>> ForallBend(AzureLoadBalancerConfiguration.mapping),
"slot_type": S("slotType"),
"swappable_cloud_service": S("swappableCloudService", "id"),
}
load_balancer_configurations: Optional[List[AzureLoadBalancerConfiguration]] = field(default=None, metadata={'description': 'List of load balancer configurations. Cloud service can have up to two load balancer configurations, corresponding to a public load balancer and an internal load balancer.'}) # fmt: skip
slot_type: Optional[str] = field(default=None, metadata={'description': 'Slot type for the cloud service. Possible values are **production** **staging** if not specified, the default value is production.'}) # fmt: skip
swappable_cloud_service: Optional[str] = field(default=None, metadata={"description": ""})
@define(eq=False, slots=False)
class AzureCloudServiceVaultAndSecretReference:
kind: ClassVar[str] = "azure_cloud_service_vault_and_secret_reference"
mapping: ClassVar[Dict[str, Bender]] = {"secret_url": S("secretUrl"), "source_vault": S("sourceVault", "id")}
secret_url: Optional[str] = field(default=None, metadata={'description': 'Secret url which contains the protected settings of the extension.'}) # fmt: skip
source_vault: Optional[str] = field(default=None, metadata={"description": ""})
@define(eq=False, slots=False)
class AzureExtension:
kind: ClassVar[str] = "azure_extension"
mapping: ClassVar[Dict[str, Bender]] = {
"auto_upgrade_minor_version": S("properties", "autoUpgradeMinorVersion"),
"force_update_tag": S("properties", "forceUpdateTag"),
"name": S("name"),
"protected_settings": S("properties", "protectedSettings"),
"protected_settings_from_key_vault": S("properties", "protectedSettingsFromKeyVault")
>> Bend(AzureCloudServiceVaultAndSecretReference.mapping),
"provisioning_state": S("properties", "provisioningState"),
"publisher": S("properties", "publisher"),
"roles_applied_to": S("properties", "rolesAppliedTo"),
"settings": S("properties", "settings"),
"type": S("properties", "type"),
"type_handler_version": S("properties", "typeHandlerVersion"),
}
auto_upgrade_minor_version: Optional[bool] = field(default=None, metadata={'description': 'Explicitly specify whether platform can automatically upgrade typehandlerversion to higher minor versions when they become available.'}) # fmt: skip
force_update_tag: Optional[str] = field(default=None, metadata={'description': 'Tag to force apply the provided public and protected settings. Changing the tag value allows for re-running the extension without changing any of the public or protected settings. If forceupdatetag is not changed, updates to public or protected settings would still be applied by the handler. If neither forceupdatetag nor any of public or protected settings change, extension would flow to the role instance with the same sequence-number, and it is up to handler implementation whether to re-run it or not.'}) # fmt: skip
name: Optional[str] = field(default=None, metadata={"description": "The name of the extension."})
protected_settings: Optional[Any] = field(default=None, metadata={'description': 'Protected settings for the extension which are encrypted before sent to the role instance.'}) # fmt: skip
protected_settings_from_key_vault: Optional[AzureCloudServiceVaultAndSecretReference] = field(default=None, metadata={'description': 'Protected settings for the extension, referenced using keyvault which are encrypted before sent to the role instance.'}) # fmt: skip
provisioning_state: Optional[str] = field(default=None, metadata={'description': 'The provisioning state, which only appears in the response.'}) # fmt: skip
publisher: Optional[str] = field(default=None, metadata={'description': 'The name of the extension handler publisher.'}) # fmt: skip
roles_applied_to: Optional[List[str]] = field(default=None, metadata={'description': 'Optional list of roles to apply this extension. If property is not specified or * is specified, extension is applied to all roles in the cloud service.'}) # fmt: skip
settings: Optional[Any] = field(default=None, metadata={'description': 'Public settings for the extension. For json extensions, this is the json settings for the extension. For xml extension (like rdp), this is the xml setting for the extension.'}) # fmt: skip
type: Optional[str] = field(default=None, metadata={"description": "Specifies the type of the extension."})
type_handler_version: Optional[str] = field(default=None, metadata={'description': 'Specifies the version of the extension. Specifies the version of the extension. If this element is not specified or an asterisk (*) is used as the value, the latest version of the extension is used. If the value is specified with a major version number and an asterisk as the minor version number (x. ), the latest minor version of the specified major version is selected. If a major version number and a minor version number are specified (x. Y), the specific extension version is selected. If a version is specified, an auto-upgrade is performed on the role instance.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureCloudServiceExtensionProfile:
kind: ClassVar[str] = "azure_cloud_service_extension_profile"
mapping: ClassVar[Dict[str, Bender]] = {"extensions": S("extensions") >> ForallBend(AzureExtension.mapping)}
extensions: Optional[List[AzureExtension]] = field(default=None, metadata={'description': 'List of extensions for the cloud service.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureSystemData:
kind: ClassVar[str] = "azure_system_data"
mapping: ClassVar[Dict[str, Bender]] = {"created_at": S("createdAt"), "last_modified_at": S("lastModifiedAt")}
created_at: Optional[datetime] = field(default=None, metadata={'description': 'Specifies the time in utc at which the cloud service (extended support) resource was created. Minimum api-version: 2022-04-04.'}) # fmt: skip
last_modified_at: Optional[datetime] = field(default=None, metadata={'description': 'Specifies the time in utc at which the cloud service (extended support) resource was last modified. Minimum api-version: 2022-04-04.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureCloudService(AzureResource):
kind: ClassVar[str] = "azure_cloud_service"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2022-09-04",
path="/subscriptions/{subscriptionId}/providers/Microsoft.Compute/cloudServices",
path_parameters=["subscriptionId"],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": K(None),
"mtime": K(None),
"atime": K(None),
"allow_model_override": S("properties", "allowModelOverride"),
"configuration": S("properties", "configuration"),
"configuration_url": S("properties", "configurationUrl"),
"extension_profile": S("properties", "extensionProfile") >> Bend(AzureCloudServiceExtensionProfile.mapping),
"network_profile": S("properties", "networkProfile") >> Bend(AzureCloudServiceNetworkProfile.mapping),
"os_profile": S("properties", "osProfile") >> Bend(AzureCloudServiceOsProfile.mapping),
"package_url": S("properties", "packageUrl"),
"provisioning_state": S("properties", "provisioningState"),
"role_profile": S("properties", "roleProfile") >> Bend(AzureCloudServiceRoleProfile.mapping),
"start_cloud_service": S("properties", "startCloudService"),
"system_data": S("systemData") >> Bend(AzureSystemData.mapping),
"unique_id": S("properties", "uniqueId"),
"upgrade_mode": S("properties", "upgradeMode"),
}
allow_model_override: Optional[bool] = field(default=None, metadata={'description': '(optional) indicates whether the role sku properties (roleprofile. Roles. Sku) specified in the model/template should override the role instance count and vm size specified in the. Cscfg and. Csdef respectively. The default value is `false`.'}) # fmt: skip
configuration: Optional[str] = field(default=None, metadata={'description': 'Specifies the xml service configuration (. Cscfg) for the cloud service.'}) # fmt: skip
configuration_url: Optional[str] = field(default=None, metadata={'description': 'Specifies a url that refers to the location of the service configuration in the blob service. The service package url can be shared access signature (sas) uri from any storage account. This is a write-only property and is not returned in get calls.'}) # fmt: skip
extension_profile: Optional[AzureCloudServiceExtensionProfile] = field(default=None, metadata={'description': 'Describes a cloud service extension profile.'}) # fmt: skip
network_profile: Optional[AzureCloudServiceNetworkProfile] = field(default=None, metadata={'description': 'Network profile for the cloud service.'}) # fmt: skip
os_profile: Optional[AzureCloudServiceOsProfile] = field(default=None, metadata={'description': 'Describes the os profile for the cloud service.'}) # fmt: skip
package_url: Optional[str] = field(default=None, metadata={'description': 'Specifies a url that refers to the location of the service package in the blob service. The service package url can be shared access signature (sas) uri from any storage account. This is a write-only property and is not returned in get calls.'}) # fmt: skip
provisioning_state: Optional[str] = field(default=None, metadata={'description': 'The provisioning state, which only appears in the response.'}) # fmt: skip
role_profile: Optional[AzureCloudServiceRoleProfile] = field(default=None, metadata={'description': 'Describes the role profile for the cloud service.'}) # fmt: skip
start_cloud_service: Optional[bool] = field(default=None, metadata={'description': '(optional) indicates whether to start the cloud service immediately after it is created. The default value is `true`. If false, the service model is still deployed, but the code is not run immediately. Instead, the service is poweredoff until you call start, at which time the service will be started. A deployed service still incurs charges, even if it is poweredoff.'}) # fmt: skip
system_data: Optional[AzureSystemData] = field(default=None, metadata={'description': 'The system meta data relating to this resource.'}) # fmt: skip
unique_id: Optional[str] = field(default=None, metadata={'description': 'The unique identifier for the cloud service.'}) # fmt: skip
upgrade_mode: Optional[str] = field(default=None, metadata={'description': 'Upgrade mode for the cloud service. Role instances are allocated to update domains when the service is deployed. Updates can be initiated manually in each update domain or initiated automatically in all update domains. Possible values are **auto** **manual** **simultaneous** if not specified, the default value is auto. If set to manual, put updatedomain must be called to apply the update. If set to auto, the update is automatically applied to each update domain in sequence.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureComputeOperationValueDisplay:
kind: ClassVar[str] = "azure_compute_operation_value_display"
mapping: ClassVar[Dict[str, Bender]] = {
"description": S("description"),
"operation": S("operation"),
"provider": S("provider"),
"resource": S("resource"),
}
description: Optional[str] = field(default=None, metadata={"description": "The description of the operation."})
operation: Optional[str] = field(default=None, metadata={'description': 'The display name of the compute operation.'}) # fmt: skip
provider: Optional[str] = field(default=None, metadata={"description": "The resource provider for the operation."})
resource: Optional[str] = field(default=None, metadata={'description': 'The display name of the resource the operation applies to.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureComputeOperationValue(AzureResource):
kind: ClassVar[str] = "azure_compute_operation_value"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2023-03-01",
path="/providers/Microsoft.Compute/operations",
path_parameters=[],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": K(None),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": K(None),
"mtime": K(None),
"atime": K(None),
"display": S("display") >> Bend(AzureComputeOperationValueDisplay.mapping),
"origin": S("origin"),
}
display: Optional[AzureComputeOperationValueDisplay] = field(default=None, metadata={'description': 'Describes the properties of a compute operation value display.'}) # fmt: skip
origin: Optional[str] = field(default=None, metadata={"description": "The origin of the compute operation."})
@define(eq=False, slots=False)
class AzureContainerServiceServicePrincipalProfile:
kind: ClassVar[str] = "azure_container_service_service_principal_profile"
mapping: ClassVar[Dict[str, Bender]] = {"client_id": S("clientId"), "secret": S("secret")}
client_id: Optional[str] = field(default=None, metadata={"description": "The id for the service principal."})
secret: Optional[str] = field(default=None, metadata={'description': 'The secret password associated with the service principal.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureContainerServiceMasterProfile:
kind: ClassVar[str] = "azure_container_service_master_profile"
mapping: ClassVar[Dict[str, Bender]] = {"count": S("count"), "dns_prefix": S("dnsPrefix"), "fqdn": S("fqdn")}
count: Optional[int] = field(default=None, metadata={'description': 'Number of masters (vms) in the container service cluster. Allowed values are 1, 3, and 5. The default value is 1.'}) # fmt: skip
dns_prefix: Optional[str] = field(default=None, metadata={'description': 'Dns prefix to be used to create the fqdn for master.'}) # fmt: skip
fqdn: Optional[str] = field(default=None, metadata={"description": "Fqdn for the master."})
@define(eq=False, slots=False)
class AzureContainerServiceWindowsProfile:
kind: ClassVar[str] = "azure_container_service_windows_profile"
mapping: ClassVar[Dict[str, Bender]] = {"admin_password": S("adminPassword"), "admin_username": S("adminUsername")}
admin_password: Optional[str] = field(default=None, metadata={'description': 'The administrator password to use for windows vms.'}) # fmt: skip
admin_username: Optional[str] = field(default=None, metadata={'description': 'The administrator username to use for windows vms.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureContainerServiceSshConfiguration:
kind: ClassVar[str] = "azure_container_service_ssh_configuration"
mapping: ClassVar[Dict[str, Bender]] = {"public_keys": S("publicKeys", default=[]) >> ForallBend(S("keyData"))}
public_keys: Optional[List[str]] = field(default=None, metadata={'description': 'The list of ssh public keys used to authenticate with linux-based vms.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureContainerServiceLinuxProfile:
kind: ClassVar[str] = "azure_container_service_linux_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"admin_username": S("adminUsername"),
"ssh": S("ssh") >> Bend(AzureContainerServiceSshConfiguration.mapping),
}
admin_username: Optional[str] = field(default=None, metadata={'description': 'The administrator username to use for linux vms.'}) # fmt: skip
ssh: Optional[AzureContainerServiceSshConfiguration] = field(default=None, metadata={'description': 'Ssh configuration for linux-based vms running on azure.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureContainerServiceVMDiagnostics:
kind: ClassVar[str] = "azure_container_service_vm_diagnostics"
mapping: ClassVar[Dict[str, Bender]] = {"enabled": S("enabled"), "storage_uri": S("storageUri")}
enabled: Optional[bool] = field(default=None, metadata={'description': 'Whether the vm diagnostic agent is provisioned on the vm.'}) # fmt: skip
storage_uri: Optional[str] = field(default=None, metadata={'description': 'The uri of the storage account where diagnostics are stored.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureDedicatedHostGroupInstanceView:
kind: ClassVar[str] = "azure_dedicated_host_group_instance_view"
mapping: ClassVar[Dict[str, Bender]] = {"hosts": S("hosts", default=[]) >> ForallBend(S("name"))}
hosts: Optional[List[str]] = field(default=None, metadata={'description': 'List of instance view of the dedicated hosts under the dedicated host group.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureDedicatedHostGroup(AzureResource):
kind: ClassVar[str] = "azure_dedicated_host_group"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2023-03-01",
path="/subscriptions/{subscriptionId}/providers/Microsoft.Compute/hostGroups",
path_parameters=["subscriptionId"],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": K(None),
"mtime": K(None),
"atime": K(None),
"ultra_ssd_enabled": S("properties", "additionalCapabilities", "ultraSSDEnabled"),
"hosts": S("properties") >> S("hosts", default=[]) >> ForallBend(S("id")),
"host_group_instance_view": S("properties", "instanceView")
>> Bend(AzureDedicatedHostGroupInstanceView.mapping),
"platform_fault_domain_count": S("properties", "platformFaultDomainCount"),
"support_automatic_placement": S("properties", "supportAutomaticPlacement"),
}
ultra_ssd_enabled: Optional[bool] = field(default=None, metadata={'description': 'Enables or disables a capability on the dedicated host group. Minimum api-version: 2022-03-01.'}) # fmt: skip
hosts: Optional[List[str]] = field(default=None, metadata={'description': 'A list of references to all dedicated hosts in the dedicated host group.'}) # fmt: skip
host_group_instance_view: Optional[AzureDedicatedHostGroupInstanceView] = field(
default=None, metadata={"description": ""}
)
platform_fault_domain_count: Optional[int] = field(default=None, metadata={'description': 'Number of fault domains that the host group can span.'}) # fmt: skip
support_automatic_placement: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether virtual machines or virtual machine scale sets can be placed automatically on the dedicated host group. Automatic placement means resources are allocated on dedicated hosts, that are chosen by azure, under the dedicated host group. The value is defaulted to false when not provided. Minimum api-version: 2020-06-01.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureDiskSku:
kind: ClassVar[str] = "azure_disk_sku"
mapping: ClassVar[Dict[str, Bender]] = {"name": S("name"), "tier": S("tier")}
name: Optional[str] = field(default=None, metadata={"description": "The sku name."})
tier: Optional[str] = field(default=None, metadata={"description": "The sku tier."})
@define(eq=False, slots=False)
class AzureExtendedLocation:
kind: ClassVar[str] = "azure_extended_location"
mapping: ClassVar[Dict[str, Bender]] = {"name": S("name"), "type": S("type")}
name: Optional[str] = field(default=None, metadata={"description": "The name of the extended location."})
type: Optional[str] = field(default=None, metadata={"description": "The type of extendedlocation."})
@define(eq=False, slots=False)
class AzurePurchasePlan:
kind: ClassVar[str] = "azure_purchase_plan"
mapping: ClassVar[Dict[str, Bender]] = {
"name": S("name"),
"product": S("product"),
"promotion_code": S("promotionCode"),
"publisher": S("publisher"),
}
name: Optional[str] = field(default=None, metadata={"description": "The plan id."})
product: Optional[str] = field(default=None, metadata={'description': 'Specifies the product of the image from the marketplace. This is the same value as offer under the imagereference element.'}) # fmt: skip
promotion_code: Optional[str] = field(default=None, metadata={"description": "The offer promotion code."})
publisher: Optional[str] = field(default=None, metadata={"description": "The publisher id."})
@define(eq=False, slots=False)
class AzureSupportedCapabilities:
kind: ClassVar[str] = "azure_supported_capabilities"
mapping: ClassVar[Dict[str, Bender]] = {
"accelerated_network": S("acceleratedNetwork"),
"architecture": S("architecture"),
"disk_controller_types": S("diskControllerTypes"),
}
accelerated_network: Optional[bool] = field(default=None, metadata={'description': 'True if the image from which the os disk is created supports accelerated networking.'}) # fmt: skip
architecture: Optional[str] = field(default=None, metadata={'description': 'Cpu architecture supported by an os disk.'}) # fmt: skip
disk_controller_types: Optional[str] = field(default=None, metadata={'description': 'The disk controllers that an os disk supports. If set it can be scsi or scsi, nvme or nvme, scsi.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureImageDiskReference:
kind: ClassVar[str] = "azure_image_disk_reference"
mapping: ClassVar[Dict[str, Bender]] = {
"community_gallery_image_id": S("communityGalleryImageId"),
"id": S("id"),
"lun": S("lun"),
"shared_gallery_image_id": S("sharedGalleryImageId"),
}
community_gallery_image_id: Optional[str] = field(default=None, metadata={'description': 'A relative uri containing a community azure compute gallery image reference.'}) # fmt: skip
id: Optional[str] = field(default=None, metadata={'description': 'A relative uri containing either a platform image repository, user image, or azure compute gallery image reference.'}) # fmt: skip
lun: Optional[int] = field(default=None, metadata={'description': 'If the disk is created from an image s data disk, this is an index that indicates which of the data disks in the image to use. For os disks, this field is null.'}) # fmt: skip
shared_gallery_image_id: Optional[str] = field(default=None, metadata={'description': 'A relative uri containing a direct shared azure compute gallery image reference.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureCreationData:
kind: ClassVar[str] = "azure_creation_data"
mapping: ClassVar[Dict[str, Bender]] = {
"create_option": S("createOption"),
"gallery_image_reference": S("galleryImageReference") >> Bend(AzureImageDiskReference.mapping),
"image_reference": S("imageReference") >> Bend(AzureImageDiskReference.mapping),
"logical_sector_size": S("logicalSectorSize"),
"performance_plus": S("performancePlus"),
"security_data_uri": S("securityDataUri"),
"source_resource_id": S("sourceResourceId"),
"source_unique_id": S("sourceUniqueId"),
"source_uri": S("sourceUri"),
"storage_account_id": S("storageAccountId"),
"upload_size_bytes": S("uploadSizeBytes"),
}
create_option: Optional[str] = field(default=None, metadata={'description': 'This enumerates the possible sources of a disk s creation.'}) # fmt: skip
gallery_image_reference: Optional[AzureImageDiskReference] = field(default=None, metadata={'description': 'The source image used for creating the disk.'}) # fmt: skip
image_reference: Optional[AzureImageDiskReference] = field(default=None, metadata={'description': 'The source image used for creating the disk.'}) # fmt: skip
logical_sector_size: Optional[int] = field(default=None, metadata={'description': 'Logical sector size in bytes for ultra disks. Supported values are 512 ad 4096. 4096 is the default.'}) # fmt: skip
performance_plus: Optional[bool] = field(default=None, metadata={'description': 'Set this flag to true to get a boost on the performance target of the disk deployed, see here on the respective performance target. This flag can only be set on disk creation time and cannot be disabled after enabled.'}) # fmt: skip
security_data_uri: Optional[str] = field(default=None, metadata={'description': 'If createoption is importsecure, this is the uri of a blob to be imported into vm guest state.'}) # fmt: skip
source_resource_id: Optional[str] = field(default=None, metadata={'description': 'If createoption is copy, this is the arm id of the source snapshot or disk.'}) # fmt: skip
source_unique_id: Optional[str] = field(default=None, metadata={'description': 'If this field is set, this is the unique id identifying the source of this resource.'}) # fmt: skip
source_uri: Optional[str] = field(default=None, metadata={'description': 'If createoption is import, this is the uri of a blob to be imported into a managed disk.'}) # fmt: skip
storage_account_id: Optional[str] = field(default=None, metadata={'description': 'Required if createoption is import. The azure resource manager identifier of the storage account containing the blob to import as a disk.'}) # fmt: skip
upload_size_bytes: Optional[int] = field(default=None, metadata={'description': 'If createoption is upload, this is the size of the contents of the upload including the vhd footer. This value should be between 20972032 (20 mib + 512 bytes for the vhd footer) and 35183298347520 bytes (32 tib + 512 bytes for the vhd footer).'}) # fmt: skip
@define(eq=False, slots=False)
class AzureKeyVaultAndSecretReference:
kind: ClassVar[str] = "azure_key_vault_and_secret_reference"
mapping: ClassVar[Dict[str, Bender]] = {"secret_url": S("secretUrl"), "source_vault": S("sourceVault", "id")}
secret_url: Optional[str] = field(default=None, metadata={'description': 'Url pointing to a key or secret in keyvault.'}) # fmt: skip
source_vault: Optional[str] = field(default=None, metadata={'description': 'The vault id is an azure resource manager resource id in the form /subscriptions/{subscriptionid}/resourcegroups/{resourcegroupname}/providers/microsoft. Keyvault/vaults/{vaultname}.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureKeyVaultAndKeyReference:
kind: ClassVar[str] = "azure_key_vault_and_key_reference"
mapping: ClassVar[Dict[str, Bender]] = {"key_url": S("keyUrl"), "source_vault": S("sourceVault", "id")}
key_url: Optional[str] = field(default=None, metadata={'description': 'Url pointing to a key or secret in keyvault.'}) # fmt: skip
source_vault: Optional[str] = field(default=None, metadata={'description': 'The vault id is an azure resource manager resource id in the form /subscriptions/{subscriptionid}/resourcegroups/{resourcegroupname}/providers/microsoft. Keyvault/vaults/{vaultname}.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureEncryptionSettingsElement:
kind: ClassVar[str] = "azure_encryption_settings_element"
mapping: ClassVar[Dict[str, Bender]] = {
"disk_encryption_key": S("diskEncryptionKey") >> Bend(AzureKeyVaultAndSecretReference.mapping),
"key_encryption_key": S("keyEncryptionKey") >> Bend(AzureKeyVaultAndKeyReference.mapping),
}
disk_encryption_key: Optional[AzureKeyVaultAndSecretReference] = field(default=None, metadata={'description': 'Key vault secret url and vault id of the encryption key.'}) # fmt: skip
key_encryption_key: Optional[AzureKeyVaultAndKeyReference] = field(default=None, metadata={'description': 'Key vault key url and vault id of kek, kek is optional and when provided is used to unwrap the encryptionkey.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureEncryptionSettingsCollection:
kind: ClassVar[str] = "azure_encryption_settings_collection"
mapping: ClassVar[Dict[str, Bender]] = {
"enabled": S("enabled"),
"encryption_settings": S("encryptionSettings") >> ForallBend(AzureEncryptionSettingsElement.mapping),
"encryption_settings_version": S("encryptionSettingsVersion"),
}
enabled: Optional[bool] = field(default=None, metadata={'description': 'Set this flag to true and provide diskencryptionkey and optional keyencryptionkey to enable encryption. Set this flag to false and remove diskencryptionkey and keyencryptionkey to disable encryption. If encryptionsettings is null in the request object, the existing settings remain unchanged.'}) # fmt: skip
encryption_settings: Optional[List[AzureEncryptionSettingsElement]] = field(default=None, metadata={'description': 'A collection of encryption settings, one for each disk volume.'}) # fmt: skip
encryption_settings_version: Optional[str] = field(default=None, metadata={'description': 'Describes what type of encryption is used for the disks. Once this field is set, it cannot be overwritten. 1. 0 corresponds to azure disk encryption with aad app. 1. 1 corresponds to azure disk encryption.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureEncryption:
kind: ClassVar[str] = "azure_encryption"
mapping: ClassVar[Dict[str, Bender]] = {"disk_encryption_set_id": S("diskEncryptionSetId"), "type": S("type")}
disk_encryption_set_id: Optional[str] = field(default=None, metadata={'description': 'Resourceid of the disk encryption set to use for enabling encryption at rest.'}) # fmt: skip
type: Optional[str] = field(default=None, metadata={'description': 'The type of key used to encrypt the data of the disk.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureDiskSecurityProfile:
kind: ClassVar[str] = "azure_disk_security_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"secure_vm_disk_encryption_set_id": S("secureVMDiskEncryptionSetId"),
"security_type": S("securityType"),
}
secure_vm_disk_encryption_set_id: Optional[str] = field(default=None, metadata={'description': 'Resourceid of the disk encryption set associated to confidential vm supported disk encrypted with customer managed key.'}) # fmt: skip
security_type: Optional[str] = field(default=None, metadata={'description': 'Specifies the securitytype of the vm. Applicable for os disks only.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureDisk(AzureResource):
kind: ClassVar[str] = "azure_disk"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2023-01-02",
path="/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks",
path_parameters=["subscriptionId"],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": S("properties", "timeCreated"),
"mtime": K(None),
"atime": K(None),
"bursting_enabled": S("properties", "burstingEnabled"),
"bursting_enabled_time": S("properties", "burstingEnabledTime"),
"completion_percent": S("properties", "completionPercent"),
"creation_data": S("properties", "creationData") >> Bend(AzureCreationData.mapping),
"data_access_auth_mode": S("properties", "dataAccessAuthMode"),
"disk_access_id": S("properties", "diskAccessId"),
"disk_iops_read_only": S("properties", "diskIOPSReadOnly"),
"disk_iops_read_write": S("properties", "diskIOPSReadWrite"),
"disk_m_bps_read_only": S("properties", "diskMBpsReadOnly"),
"disk_m_bps_read_write": S("properties", "diskMBpsReadWrite"),
"disk_size_bytes": S("properties", "diskSizeBytes"),
"disk_size_gb": S("properties", "diskSizeGB"),
"disk_state": S("properties", "diskState"),
"disk_encryption": S("properties", "encryption") >> Bend(AzureEncryption.mapping),
"encryption_settings_collection": S("properties", "encryptionSettingsCollection")
>> Bend(AzureEncryptionSettingsCollection.mapping),
"extended_location": S("extendedLocation") >> Bend(AzureExtendedLocation.mapping),
"hyper_v_generation": S("properties", "hyperVGeneration"),
"managed_by": S("managedBy"),
"managed_by_extended": S("managedByExtended"),
"max_shares": S("properties", "maxShares"),
"network_access_policy": S("properties", "networkAccessPolicy"),
"optimized_for_frequent_attach": S("properties", "optimizedForFrequentAttach"),
"os_type": S("properties", "osType"),
"property_updates_in_progress": S("properties", "propertyUpdatesInProgress", "targetTier"),
"provisioning_state": S("properties", "provisioningState"),
"public_network_access": S("properties", "publicNetworkAccess"),
"purchase_plan": S("properties", "purchasePlan") >> Bend(AzurePurchasePlan.mapping),
"disk_security_profile": S("properties", "securityProfile") >> Bend(AzureDiskSecurityProfile.mapping),
"share_info": S("properties") >> S("shareInfo", default=[]) >> ForallBend(S("vmUri")),
"disk_sku": S("sku") >> Bend(AzureDiskSku.mapping),
"supported_capabilities": S("properties", "supportedCapabilities") >> Bend(AzureSupportedCapabilities.mapping),
"supports_hibernation": S("properties", "supportsHibernation"),
"tier": S("properties", "tier"),
"time_created": S("properties", "timeCreated"),
"unique_id": S("properties", "uniqueId"),
}
bursting_enabled: Optional[bool] = field(default=None, metadata={'description': 'Set to true to enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default. Does not apply to ultra disks.'}) # fmt: skip
bursting_enabled_time: Optional[datetime] = field(default=None, metadata={'description': 'Latest time when bursting was last enabled on a disk.'}) # fmt: skip
completion_percent: Optional[float] = field(default=None, metadata={'description': 'Percentage complete for the background copy when a resource is created via the copystart operation.'}) # fmt: skip
creation_data: Optional[AzureCreationData] = field(default=None, metadata={'description': 'Data used when creating a disk.'}) # fmt: skip
data_access_auth_mode: Optional[str] = field(default=None, metadata={'description': 'Additional authentication requirements when exporting or uploading to a disk or snapshot.'}) # fmt: skip
disk_access_id: Optional[str] = field(default=None, metadata={'description': 'Arm id of the diskaccess resource for using private endpoints on disks.'}) # fmt: skip
disk_iops_read_only: Optional[int] = field(default=None, metadata={'description': 'The total number of iops that will be allowed across all vms mounting the shared disk as readonly. One operation can transfer between 4k and 256k bytes.'}) # fmt: skip
disk_iops_read_write: Optional[int] = field(default=None, metadata={'description': 'The number of iops allowed for this disk; only settable for ultrassd disks. One operation can transfer between 4k and 256k bytes.'}) # fmt: skip
disk_m_bps_read_only: Optional[int] = field(default=None, metadata={'description': 'The total throughput (mbps) that will be allowed across all vms mounting the shared disk as readonly. Mbps means millions of bytes per second - mb here uses the iso notation, of powers of 10.'}) # fmt: skip
disk_m_bps_read_write: Optional[int] = field(default=None, metadata={'description': 'The bandwidth allowed for this disk; only settable for ultrassd disks. Mbps means millions of bytes per second - mb here uses the iso notation, of powers of 10.'}) # fmt: skip
disk_size_bytes: Optional[int] = field(default=None, metadata={'description': 'The size of the disk in bytes. This field is read only.'}) # fmt: skip
disk_size_gb: Optional[int] = field(default=None, metadata={'description': 'If creationdata. Createoption is empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running vm, and can only increase the disk s size.'}) # fmt: skip
disk_state: Optional[str] = field(default=None, metadata={'description': 'This enumerates the possible state of the disk.'}) # fmt: skip
disk_encryption: Optional[AzureEncryption] = field(default=None, metadata={'description': 'Encryption at rest settings for disk or snapshot.'}) # fmt: skip
encryption_settings_collection: Optional[AzureEncryptionSettingsCollection] = field(default=None, metadata={'description': 'Encryption settings for disk or snapshot.'}) # fmt: skip
extended_location: Optional[AzureExtendedLocation] = field(default=None, metadata={'description': 'The complex type of the extended location.'}) # fmt: skip
hyper_v_generation: Optional[str] = field(default=None, metadata={'description': 'The hypervisor generation of the virtual machine. Applicable to os disks only.'}) # fmt: skip
managed_by: Optional[str] = field(default=None, metadata={'description': 'A relative uri containing the id of the vm that has the disk attached.'}) # fmt: skip
managed_by_extended: Optional[List[str]] = field(default=None, metadata={'description': 'List of relative uris containing the ids of the vms that have the disk attached. Maxshares should be set to a value greater than one for disks to allow attaching them to multiple vms.'}) # fmt: skip
max_shares: Optional[int] = field(default=None, metadata={'description': 'The maximum number of vms that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple vms at the same time.'}) # fmt: skip
network_access_policy: Optional[str] = field(default=None, metadata={'description': 'Policy for accessing the disk via network.'}) # fmt: skip
optimized_for_frequent_attach: Optional[bool] = field(default=None, metadata={'description': 'Setting this property to true improves reliability and performance of data disks that are frequently (more than 5 times a day) by detached from one virtual machine and attached to another. This property should not be set for disks that are not detached and attached frequently as it causes the disks to not align with the fault domain of the virtual machine.'}) # fmt: skip
os_type: Optional[str] = field(default=None, metadata={"description": "The operating system type."})
property_updates_in_progress: Optional[str] = field(default=None, metadata={'description': 'Properties of the disk for which update is pending.'}) # fmt: skip
provisioning_state: Optional[str] = field(default=None, metadata={"description": "The disk provisioning state."})
public_network_access: Optional[str] = field(default=None, metadata={'description': 'Policy for controlling export on the disk.'}) # fmt: skip
purchase_plan: Optional[AzurePurchasePlan] = field(default=None, metadata={'description': 'Used for establishing the purchase context of any 3rd party artifact through marketplace.'}) # fmt: skip
disk_security_profile: Optional[AzureDiskSecurityProfile] = field(default=None, metadata={'description': 'Contains the security related information for the resource.'}) # fmt: skip
share_info: Optional[List[str]] = field(default=None, metadata={'description': 'Details of the list of all vms that have the disk attached. Maxshares should be set to a value greater than one for disks to allow attaching them to multiple vms.'}) # fmt: skip
disk_sku: Optional[AzureDiskSku] = field(default=None, metadata={'description': 'The disks sku name. Can be standard_lrs, premium_lrs, standardssd_lrs, ultrassd_lrs, premium_zrs, standardssd_zrs, or premiumv2_lrs.'}) # fmt: skip
supported_capabilities: Optional[AzureSupportedCapabilities] = field(default=None, metadata={'description': 'List of supported capabilities persisted on the disk resource for vm use.'}) # fmt: skip
supports_hibernation: Optional[bool] = field(default=None, metadata={'description': 'Indicates the os on a disk supports hibernation.'}) # fmt: skip
tier: Optional[str] = field(default=None, metadata={'description': 'Performance tier of the disk (e. G, p4, s10) as described here: https://azure. Microsoft. Com/en-us/pricing/details/managed-disks/. Does not apply to ultra disks.'}) # fmt: skip
time_created: Optional[datetime] = field(default=None, metadata={'description': 'The time when the disk was created.'}) # fmt: skip
unique_id: Optional[str] = field(default=None, metadata={"description": "Unique guid identifying the resource."})
@define(eq=False, slots=False)
class AzurePrivateLinkServiceConnectionState:
kind: ClassVar[str] = "azure_private_link_service_connection_state"
mapping: ClassVar[Dict[str, Bender]] = {
"actions_required": S("actionsRequired"),
"description": S("description"),
"status": S("status"),
}
actions_required: Optional[str] = field(default=None, metadata={'description': 'A message indicating if changes on the service provider require any updates on the consumer.'}) # fmt: skip
description: Optional[str] = field(default=None, metadata={'description': 'The reason for approval/rejection of the connection.'}) # fmt: skip
status: Optional[str] = field(default=None, metadata={"description": "The private endpoint connection status."})
@define(eq=False, slots=False)
class AzurePrivateEndpointConnection:
kind: ClassVar[str] = "azure_private_endpoint_connection"
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"name": S("name"),
"private_endpoint": S("properties", "privateEndpoint", "id"),
"private_link_service_connection_state": S("properties", "privateLinkServiceConnectionState")
>> Bend(AzurePrivateLinkServiceConnectionState.mapping),
"provisioning_state": S("properties", "provisioningState"),
"type": S("type"),
}
id: Optional[str] = field(default=None, metadata={"description": "Private endpoint connection id."})
name: Optional[str] = field(default=None, metadata={"description": "Private endpoint connection name."})
private_endpoint: Optional[str] = field(default=None, metadata={"description": "The private endpoint resource."})
private_link_service_connection_state: Optional[AzurePrivateLinkServiceConnectionState] = field(default=None, metadata={'description': 'A collection of information about the state of the connection between service consumer and provider.'}) # fmt: skip
provisioning_state: Optional[str] = field(default=None, metadata={'description': 'The current provisioning state.'}) # fmt: skip
type: Optional[str] = field(default=None, metadata={"description": "Private endpoint connection type."})
@define(eq=False, slots=False)
class AzureDiskAccess(AzureResource):
kind: ClassVar[str] = "azure_disk_access"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2023-01-02",
path="/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskAccesses",
path_parameters=["subscriptionId"],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": S("time_created"),
"mtime": K(None),
"atime": K(None),
"extended_location": S("extendedLocation") >> Bend(AzureExtendedLocation.mapping),
"private_endpoint_connections": S("properties", "privateEndpointConnections")
>> ForallBend(AzurePrivateEndpointConnection.mapping),
"provisioning_state": S("properties", "provisioningState"),
"time_created": S("properties", "timeCreated"),
}
extended_location: Optional[AzureExtendedLocation] = field(default=None, metadata={'description': 'The complex type of the extended location.'}) # fmt: skip
private_endpoint_connections: Optional[List[AzurePrivateEndpointConnection]] = field(default=None, metadata={'description': 'A readonly collection of private endpoint connections created on the disk. Currently only one endpoint connection is supported.'}) # fmt: skip
provisioning_state: Optional[str] = field(default=None, metadata={'description': 'The disk access resource provisioning state.'}) # fmt: skip
time_created: Optional[datetime] = field(default=None, metadata={'description': 'The time when the disk access was created.'}) # fmt: skip
@define(eq=False, slots=False)
class AzurePrincipalidClientid:
kind: ClassVar[str] = "azure_principalid_clientid"
mapping: ClassVar[Dict[str, Bender]] = {"client_id": S("clientId"), "principal_id": S("principalId")}
client_id: Optional[str] = field(default=None, metadata={'description': 'The client id of user assigned identity.'}) # fmt: skip
principal_id: Optional[str] = field(default=None, metadata={'description': 'The principal id of user assigned identity.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureEncryptionSetIdentity:
kind: ClassVar[str] = "azure_encryption_set_identity"
mapping: ClassVar[Dict[str, Bender]] = {
"principal_id": S("principalId"),
"tenant_id": S("tenantId"),
"type": S("type"),
"user_assigned_identities": S("userAssignedIdentities"),
}
principal_id: Optional[str] = field(default=None, metadata={'description': 'The object id of the managed identity resource. This will be sent to the rp from arm via the x-ms-identity-principal-id header in the put request if the resource has a systemassigned(implicit) identity.'}) # fmt: skip
tenant_id: Optional[str] = field(default=None, metadata={'description': 'The tenant id of the managed identity resource. This will be sent to the rp from arm via the x-ms-client-tenant-id header in the put request if the resource has a systemassigned(implicit) identity.'}) # fmt: skip
type: Optional[str] = field(default=None, metadata={'description': 'The type of managed identity used by the diskencryptionset. Only systemassigned is supported for new creations. Disk encryption sets can be updated with identity type none during migration of subscription to a new azure active directory tenant; it will cause the encrypted resources to lose access to the keys.'}) # fmt: skip
user_assigned_identities: Optional[Dict[str, AzurePrincipalidClientid]] = field(default=None, metadata={'description': 'The list of user identities associated with the virtual machine. The user identity dictionary key references will be arm resource ids in the form: /subscriptions/{subscriptionid}/resourcegroups/{resourcegroupname}/providers/microsoft. Managedidentity/userassignedidentities/{identityname}.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureKeyForDiskEncryptionSet:
kind: ClassVar[str] = "azure_key_for_disk_encryption_set"
mapping: ClassVar[Dict[str, Bender]] = {"key_url": S("keyUrl"), "source_vault": S("sourceVault", "id")}
key_url: Optional[str] = field(default=None, metadata={'description': 'Fully versioned key url pointing to a key in keyvault. Version segment of the url is required regardless of rotationtolatestkeyversionenabled value.'}) # fmt: skip
source_vault: Optional[str] = field(default=None, metadata={'description': 'The vault id is an azure resource manager resource id in the form /subscriptions/{subscriptionid}/resourcegroups/{resourcegroupname}/providers/microsoft. Keyvault/vaults/{vaultname}.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureApiErrorBase:
kind: ClassVar[str] = "azure_api_error_base"
mapping: ClassVar[Dict[str, Bender]] = {"code": S("code"), "message": S("message"), "target": S("target")}
code: Optional[str] = field(default=None, metadata={"description": "The error code."})
message: Optional[str] = field(default=None, metadata={"description": "The error message."})
target: Optional[str] = field(default=None, metadata={"description": "The target of the particular error."})
@define(eq=False, slots=False)
class AzureInnerError:
kind: ClassVar[str] = "azure_inner_error"
mapping: ClassVar[Dict[str, Bender]] = {"errordetail": S("errordetail"), "exceptiontype": S("exceptiontype")}
errordetail: Optional[str] = field(default=None, metadata={'description': 'The internal error message or exception dump.'}) # fmt: skip
exceptiontype: Optional[str] = field(default=None, metadata={"description": "The exception type."})
@define(eq=False, slots=False)
class AzureApiError:
kind: ClassVar[str] = "azure_api_error"
mapping: ClassVar[Dict[str, Bender]] = {
"code": S("code"),
"details": S("details") >> ForallBend(AzureApiErrorBase.mapping),
"innererror": S("innererror") >> Bend(AzureInnerError.mapping),
"message": S("message"),
"target": S("target"),
}
code: Optional[str] = field(default=None, metadata={"description": "The error code."})
details: Optional[List[AzureApiErrorBase]] = field(default=None, metadata={"description": "The api error details."})
innererror: Optional[AzureInnerError] = field(default=None, metadata={"description": "Inner error details."})
message: Optional[str] = field(default=None, metadata={"description": "The error message."})
target: Optional[str] = field(default=None, metadata={"description": "The target of the particular error."})
@define(eq=False, slots=False)
class AzureDiskEncryptionSet(AzureResource):
kind: ClassVar[str] = "azure_disk_encryption_set"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2023-01-02",
path="/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets",
path_parameters=["subscriptionId"],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": K(None),
"mtime": K(None),
"atime": K(None),
"active_key": S("properties", "activeKey") >> Bend(AzureKeyForDiskEncryptionSet.mapping),
"auto_key_rotation_error": S("properties", "autoKeyRotationError") >> Bend(AzureApiError.mapping),
"encryption_type": S("properties", "encryptionType"),
"federated_client_id": S("properties", "federatedClientId"),
"encryption_set_identity": S("identity") >> Bend(AzureEncryptionSetIdentity.mapping),
"last_key_rotation_timestamp": S("properties", "lastKeyRotationTimestamp"),
"previous_keys": S("properties", "previousKeys") >> ForallBend(AzureKeyForDiskEncryptionSet.mapping),
"provisioning_state": S("properties", "provisioningState"),
"rotation_to_latest_key_version_enabled": S("properties", "rotationToLatestKeyVersionEnabled"),
}
active_key: Optional[AzureKeyForDiskEncryptionSet] = field(default=None, metadata={'description': 'Key vault key url to be used for server side encryption of managed disks and snapshots.'}) # fmt: skip
auto_key_rotation_error: Optional[AzureApiError] = field(default=None, metadata={"description": "Api error."})
encryption_type: Optional[str] = field(default=None, metadata={'description': 'The type of key used to encrypt the data of the disk.'}) # fmt: skip
federated_client_id: Optional[str] = field(default=None, metadata={'description': 'Multi-tenant application client id to access key vault in a different tenant. Setting the value to none will clear the property.'}) # fmt: skip
encryption_set_identity: Optional[AzureEncryptionSetIdentity] = field(default=None, metadata={'description': 'The managed identity for the disk encryption set. It should be given permission on the key vault before it can be used to encrypt disks.'}) # fmt: skip
last_key_rotation_timestamp: Optional[datetime] = field(default=None, metadata={'description': 'The time when the active key of this disk encryption set was updated.'}) # fmt: skip
previous_keys: Optional[List[AzureKeyForDiskEncryptionSet]] = field(default=None, metadata={'description': 'A readonly collection of key vault keys previously used by this disk encryption set while a key rotation is in progress. It will be empty if there is no ongoing key rotation.'}) # fmt: skip
provisioning_state: Optional[str] = field(default=None, metadata={'description': 'The disk encryption set provisioning state.'}) # fmt: skip
rotation_to_latest_key_version_enabled: Optional[bool] = field(default=None, metadata={'description': 'Set this flag to true to enable auto-updating of this disk encryption set to the latest key version.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureSharingProfileGroup:
kind: ClassVar[str] = "azure_sharing_profile_group"
mapping: ClassVar[Dict[str, Bender]] = {"ids": S("ids"), "type": S("type")}
ids: Optional[List[str]] = field(default=None, metadata={'description': 'A list of subscription/tenant ids the gallery is aimed to be shared to.'}) # fmt: skip
type: Optional[str] = field(default=None, metadata={'description': 'This property allows you to specify the type of sharing group. Possible values are: **subscriptions** **aadtenants**.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureCommunityGalleryInfo:
kind: ClassVar[str] = "azure_community_gallery_info"
mapping: ClassVar[Dict[str, Bender]] = {
"community_gallery_enabled": S("communityGalleryEnabled"),
"eula": S("eula"),
"public_name_prefix": S("publicNamePrefix"),
"public_names": S("publicNames"),
"publisher_contact": S("publisherContact"),
"publisher_uri": S("publisherUri"),
}
community_gallery_enabled: Optional[bool] = field(default=None, metadata={'description': 'Contains info about whether community gallery sharing is enabled.'}) # fmt: skip
eula: Optional[str] = field(default=None, metadata={'description': 'End-user license agreement for community gallery image.'}) # fmt: skip
public_name_prefix: Optional[str] = field(default=None, metadata={'description': 'The prefix of the gallery name that will be displayed publicly. Visible to all users.'}) # fmt: skip
public_names: Optional[List[str]] = field(
default=None, metadata={"description": "Community gallery public name list."}
)
publisher_contact: Optional[str] = field(default=None, metadata={'description': 'Community gallery publisher support email. The email address of the publisher. Visible to all users.'}) # fmt: skip
publisher_uri: Optional[str] = field(default=None, metadata={'description': 'The link to the publisher website. Visible to all users.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureSharingProfile:
kind: ClassVar[str] = "azure_sharing_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"community_gallery_info": S("communityGalleryInfo") >> Bend(AzureCommunityGalleryInfo.mapping),
"groups": S("groups") >> ForallBend(AzureSharingProfileGroup.mapping),
"permissions": S("permissions"),
}
community_gallery_info: Optional[AzureCommunityGalleryInfo] = field(default=None, metadata={'description': 'Information of community gallery if current gallery is shared to community.'}) # fmt: skip
groups: Optional[List[AzureSharingProfileGroup]] = field(default=None, metadata={'description': 'A list of sharing profile groups.'}) # fmt: skip
permissions: Optional[str] = field(default=None, metadata={'description': 'This property allows you to specify the permission of sharing gallery. Possible values are: **private** **groups** **community**.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureRegionalSharingStatus:
kind: ClassVar[str] = "azure_regional_sharing_status"
mapping: ClassVar[Dict[str, Bender]] = {"details": S("details"), "region": S("region"), "state": S("state")}
details: Optional[str] = field(default=None, metadata={'description': 'Details of gallery regional sharing failure.'}) # fmt: skip
region: Optional[str] = field(default=None, metadata={"description": "Region name."})
state: Optional[str] = field(default=None, metadata={'description': 'The sharing state of the gallery, which only appears in the response.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureSharingStatus:
kind: ClassVar[str] = "azure_sharing_status"
mapping: ClassVar[Dict[str, Bender]] = {
"aggregated_state": S("aggregatedState"),
"summary": S("summary") >> ForallBend(AzureRegionalSharingStatus.mapping),
}
aggregated_state: Optional[str] = field(default=None, metadata={'description': 'The sharing state of the gallery, which only appears in the response.'}) # fmt: skip
summary: Optional[List[AzureRegionalSharingStatus]] = field(default=None, metadata={'description': 'Summary of all regional sharing status.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureGallery(AzureResource):
kind: ClassVar[str] = "azure_gallery"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2022-03-03",
path="/subscriptions/{subscriptionId}/providers/Microsoft.Compute/galleries",
path_parameters=["subscriptionId"],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": K(None),
"mtime": K(None),
"atime": K(None),
"description": S("properties", "description"),
"identifier": S("properties", "identifier", "uniqueName"),
"provisioning_state": S("properties", "provisioningState"),
"sharing_profile": S("properties", "sharingProfile") >> Bend(AzureSharingProfile.mapping),
"sharing_status": S("properties", "sharingStatus") >> Bend(AzureSharingStatus.mapping),
"soft_delete_policy": S("properties", "softDeletePolicy", "isSoftDeleteEnabled"),
}
description: Optional[str] = field(default=None, metadata={'description': 'The description of this shared image gallery resource. This property is updatable.'}) # fmt: skip
identifier: Optional[str] = field(default=None, metadata={"description": "Describes the gallery unique name."})
provisioning_state: Optional[str] = field(default=None, metadata={'description': 'The provisioning state, which only appears in the response.'}) # fmt: skip
sharing_profile: Optional[AzureSharingProfile] = field(default=None, metadata={'description': 'Profile for gallery sharing to subscription or tenant.'}) # fmt: skip
sharing_status: Optional[AzureSharingStatus] = field(default=None, metadata={'description': 'Sharing status of current gallery.'}) # fmt: skip
soft_delete_policy: Optional[bool] = field(default=None, metadata={'description': 'Contains information about the soft deletion policy of the gallery.'}) # fmt: skip
class AzureSubResource:
kind: ClassVar[str] = "azure_sub_resource"
mapping: ClassVar[Dict[str, Bender]] = {"id": S("id")}
id: Optional[str] = field(default=None, metadata={"description": "Resource id."})
@define(eq=False, slots=False)
class AzureDiskEncryptionSetParameters(AzureSubResource):
kind: ClassVar[str] = "azure_disk_encryption_set_parameters"
mapping: ClassVar[Dict[str, Bender]] = {}
@define(eq=False, slots=False)
class AzureImageDisk:
kind: ClassVar[str] = "azure_image_disk"
mapping: ClassVar[Dict[str, Bender]] = {
"blob_uri": S("blobUri"),
"caching": S("caching"),
"disk_encryption_set": S("diskEncryptionSet") >> Bend(AzureDiskEncryptionSetParameters.mapping),
"disk_size_gb": S("diskSizeGB"),
"managed_disk": S("managedDisk", "id"),
"snapshot": S("snapshot", "id"),
"storage_account_type": S("storageAccountType"),
}
blob_uri: Optional[str] = field(default=None, metadata={"description": "The virtual hard disk."})
caching: Optional[str] = field(default=None, metadata={'description': 'Specifies the caching requirements. Possible values are: **none,** **readonly,** **readwrite. ** the default values are: **none for standard storage. Readonly for premium storage. **.'}) # fmt: skip
disk_encryption_set: Optional[AzureDiskEncryptionSetParameters] = field(default=None, metadata={'description': 'Describes the parameter of customer managed disk encryption set resource id that can be specified for disk. **note:** the disk encryption set resource id can only be specified for managed disk. Please refer https://aka. Ms/mdssewithcmkoverview for more details.'}) # fmt: skip
disk_size_gb: Optional[int] = field(default=None, metadata={'description': 'Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image. This value cannot be larger than 1023 gb.'}) # fmt: skip
managed_disk: Optional[str] = field(default=None, metadata={"description": ""})
snapshot: Optional[str] = field(default=None, metadata={"description": ""})
storage_account_type: Optional[str] = field(default=None, metadata={'description': 'Specifies the storage account type for the managed disk. Managed os disk storage account type can only be set when you create the scale set. Note: ultrassd_lrs can only be used with data disks. It cannot be used with os disk. Standard_lrs uses standard hdd. Standardssd_lrs uses standard ssd. Premium_lrs uses premium ssd. Ultrassd_lrs uses ultra disk. Premium_zrs uses premium ssd zone redundant storage. Standardssd_zrs uses standard ssd zone redundant storage. For more information regarding disks supported for windows virtual machines, refer to https://docs. Microsoft. Com/azure/virtual-machines/windows/disks-types and, for linux virtual machines, refer to https://docs. Microsoft. Com/azure/virtual-machines/linux/disks-types.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureImageOSDisk(AzureImageDisk):
kind: ClassVar[str] = "azure_image_os_disk"
mapping: ClassVar[Dict[str, Bender]] = {"os_state": S("osState"), "os_type": S("osType")}
os_state: Optional[str] = field(default=None, metadata={'description': 'The os state. For managed images, use generalized.'}) # fmt: skip
os_type: Optional[str] = field(default=None, metadata={'description': 'This property allows you to specify the type of the os that is included in the disk if creating a vm from a custom image. Possible values are: **windows,** **linux. **.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureImageStorageProfile:
kind: ClassVar[str] = "azure_image_storage_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"data_disks": S("dataDisks", default=[]) >> ForallBend(S("lun")),
"os_disk": S("osDisk") >> Bend(AzureImageOSDisk.mapping),
"zone_resilient": S("zoneResilient"),
}
data_disks: Optional[List[int]] = field(default=None, metadata={'description': 'Specifies the parameters that are used to add a data disk to a virtual machine. For more information about disks, see [about disks and vhds for azure virtual machines](https://docs. Microsoft. Com/azure/virtual-machines/managed-disks-overview).'}) # fmt: skip
os_disk: Optional[AzureImageOSDisk] = field(default=None, metadata={'description': 'Describes an operating system disk.'}) # fmt: skip
zone_resilient: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether an image is zone resilient or not. Default is false. Zone resilient images can be created only in regions that provide zone redundant storage (zrs).'}) # fmt: skip
@define(eq=False, slots=False)
class AzureImage(AzureResource):
kind: ClassVar[str] = "azure_image"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2023-03-01",
path="/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images",
path_parameters=["subscriptionId"],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": K(None),
"mtime": K(None),
"atime": K(None),
"extended_location": S("extendedLocation") >> Bend(AzureExtendedLocation.mapping),
"hyper_v_generation": S("properties", "hyperVGeneration"),
"provisioning_state": S("properties", "provisioningState"),
"source_virtual_machine": S("properties", "sourceVirtualMachine", "id"),
"storage_profile": S("properties", "storageProfile") >> Bend(AzureImageStorageProfile.mapping),
}
extended_location: Optional[AzureExtendedLocation] = field(default=None, metadata={'description': 'The complex type of the extended location.'}) # fmt: skip
hyper_v_generation: Optional[str] = field(default=None, metadata={'description': 'Specifies the hypervgeneration type.'}) # fmt: skip
provisioning_state: Optional[str] = field(default=None, metadata={"description": "The provisioning state."})
source_virtual_machine: Optional[str] = field(default=None, metadata={"description": ""})
storage_profile: Optional[AzureImageStorageProfile] = field(default=None, metadata={'description': 'Describes a storage profile.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureSubResourceWithColocationStatus(AzureSubResource):
kind: ClassVar[str] = "azure_sub_resource_with_colocation_status"
mapping: ClassVar[Dict[str, Bender]] = {
"colocation_status": S("colocationStatus") >> Bend(AzureInstanceViewStatus.mapping)
}
colocation_status: Optional[AzureInstanceViewStatus] = field(default=None, metadata={'description': 'Instance view status.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVmSizes:
kind: ClassVar[str] = "azure_vm_sizes"
mapping: ClassVar[Dict[str, Bender]] = {"vm_sizes": S("vmSizes")}
vm_sizes: Optional[List[str]] = field(default=None, metadata={'description': 'Specifies possible sizes of virtual machines that can be created in the proximity placement group.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureProximityPlacementGroup(AzureResource):
kind: ClassVar[str] = "azure_proximity_placement_group"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2023-03-01",
path="/subscriptions/{subscriptionId}/providers/Microsoft.Compute/proximityPlacementGroups",
path_parameters=["subscriptionId"],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": K(None),
"mtime": K(None),
"atime": K(None),
"availability_sets": S("properties", "availabilitySets")
>> ForallBend(AzureSubResourceWithColocationStatus.mapping),
"colocation_status": S("properties", "colocationStatus") >> Bend(AzureInstanceViewStatus.mapping),
"intent": S("properties", "intent") >> Bend(AzureVmSizes.mapping),
"proximity_placement_group_type": S("properties", "proximityPlacementGroupType"),
"virtual_machine_scale_sets": S("properties", "virtualMachineScaleSets")
>> ForallBend(AzureSubResourceWithColocationStatus.mapping),
"virtual_machines_status": S("properties", "virtualMachines")
>> ForallBend(AzureSubResourceWithColocationStatus.mapping),
}
availability_sets: Optional[List[AzureSubResourceWithColocationStatus]] = field(default=None, metadata={'description': 'A list of references to all availability sets in the proximity placement group.'}) # fmt: skip
colocation_status: Optional[AzureInstanceViewStatus] = field(default=None, metadata={'description': 'Instance view status.'}) # fmt: skip
intent: Optional[AzureVmSizes] = field(default=None, metadata={'description': 'Specifies the user intent of the proximity placement group.'}) # fmt: skip
proximity_placement_group_type: Optional[str] = field(default=None, metadata={'description': 'Specifies the type of the proximity placement group. Possible values are: **standard** : co-locate resources within an azure region or availability zone. **ultra** : for future use.'}) # fmt: skip
virtual_machine_scale_sets: Optional[List[AzureSubResourceWithColocationStatus]] = field(default=None, metadata={'description': 'A list of references to all virtual machine scale sets in the proximity placement group.'}) # fmt: skip
virtual_machines_status: Optional[List[AzureSubResourceWithColocationStatus]] = field(default=None, metadata={'description': 'A list of references to all virtual machines in the proximity placement group.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureResourceSkuCapacity:
kind: ClassVar[str] = "azure_resource_sku_capacity"
mapping: ClassVar[Dict[str, Bender]] = {
"default": S("default"),
"maximum": S("maximum"),
"minimum": S("minimum"),
"scale_type": S("scaleType"),
}
default: Optional[int] = field(default=None, metadata={"description": "The default capacity."})
maximum: Optional[int] = field(default=None, metadata={"description": "The maximum capacity that can be set."})
minimum: Optional[int] = field(default=None, metadata={"description": "The minimum capacity."})
scale_type: Optional[str] = field(default=None, metadata={"description": "The scale type applicable to the sku."})
@define(eq=False, slots=False)
class AzureResourceSkuCapabilities:
kind: ClassVar[str] = "azure_resource_sku_capabilities"
mapping: ClassVar[Dict[str, Bender]] = {"name": S("name"), "value": S("value")}
name: Optional[str] = field(default=None, metadata={"description": "An invariant to describe the feature."})
value: Optional[str] = field(default=None, metadata={'description': 'An invariant if the feature is measured by quantity.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureResourceSkuZoneDetails:
kind: ClassVar[str] = "azure_resource_sku_zone_details"
mapping: ClassVar[Dict[str, Bender]] = {
"capabilities": S("capabilities") >> ForallBend(AzureResourceSkuCapabilities.mapping),
"name": S("name"),
}
capabilities: Optional[List[AzureResourceSkuCapabilities]] = field(default=None, metadata={'description': 'A list of capabilities that are available for the sku in the specified list of zones.'}) # fmt: skip
name: Optional[List[str]] = field(default=None, metadata={'description': 'The set of zones that the sku is available in with the specified capabilities.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureResourceSkuLocationInfo:
kind: ClassVar[str] = "azure_resource_sku_location_info"
mapping: ClassVar[Dict[str, Bender]] = {
"extended_locations": S("extendedLocations"),
"location": S("location"),
"type": S("type"),
"zone_details": S("zoneDetails") >> ForallBend(AzureResourceSkuZoneDetails.mapping),
"zones": S("zones"),
}
extended_locations: Optional[List[str]] = field(default=None, metadata={'description': 'The names of extended locations.'}) # fmt: skip
location: Optional[str] = field(default=None, metadata={"description": "Location of the sku."})
type: Optional[str] = field(default=None, metadata={"description": "The type of the extended location."})
zone_details: Optional[List[AzureResourceSkuZoneDetails]] = field(default=None, metadata={'description': 'Details of capabilities available to a sku in specific zones.'}) # fmt: skip
zones: Optional[List[str]] = field(default=None, metadata={'description': 'List of availability zones where the sku is supported.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureResourceSkuCosts:
kind: ClassVar[str] = "azure_resource_sku_costs"
mapping: ClassVar[Dict[str, Bender]] = {
"extended_unit": S("extendedUnit"),
"meter_id": S("meterID"),
"quantity": S("quantity"),
}
extended_unit: Optional[str] = field(default=None, metadata={'description': 'An invariant to show the extended unit.'}) # fmt: skip
meter_id: Optional[str] = field(default=None, metadata={"description": "Used for querying price from commerce."})
quantity: Optional[int] = field(default=None, metadata={'description': 'The multiplier is needed to extend the base metered cost.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureResourceSkuRestrictionInfo:
kind: ClassVar[str] = "azure_resource_sku_restriction_info"
mapping: ClassVar[Dict[str, Bender]] = {"locations": S("locations"), "zones": S("zones")}
locations: Optional[List[str]] = field(
default=None, metadata={"description": "Locations where the sku is restricted."}
)
zones: Optional[List[str]] = field(default=None, metadata={'description': 'List of availability zones where the sku is restricted.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureResourceSkuRestrictions:
kind: ClassVar[str] = "azure_resource_sku_restrictions"
mapping: ClassVar[Dict[str, Bender]] = {
"reason_code": S("reasonCode"),
"restriction_info": S("restrictionInfo") >> Bend(AzureResourceSkuRestrictionInfo.mapping),
"type": S("type"),
"values": S("values"),
}
reason_code: Optional[str] = field(default=None, metadata={"description": "The reason for restriction."})
restriction_info: Optional[AzureResourceSkuRestrictionInfo] = field(default=None, metadata={'description': 'Describes an available compute sku restriction information.'}) # fmt: skip
type: Optional[str] = field(default=None, metadata={"description": "The type of restrictions."})
values: Optional[List[str]] = field(default=None, metadata={'description': 'The value of restrictions. If the restriction type is set to location. This would be different locations where the sku is restricted.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureResourceSku(AzureResource):
kind: ClassVar[str] = "azure_resource_sku"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2021-07-01",
path="/subscriptions/{subscriptionId}/providers/Microsoft.Compute/skus",
path_parameters=["subscriptionId"],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": K(None),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": K(None),
"mtime": K(None),
"atime": K(None),
"api_versions": S("apiVersions"),
"capabilities": S("capabilities") >> ForallBend(AzureResourceSkuCapabilities.mapping),
"capacity": S("capacity") >> Bend(AzureResourceSkuCapacity.mapping),
"costs": S("costs") >> ForallBend(AzureResourceSkuCosts.mapping),
"family": S("family"),
"sku_kind": S("kind"),
"location_info": S("locationInfo") >> ForallBend(AzureResourceSkuLocationInfo.mapping),
"locations": S("locations"),
"resource_type": S("resourceType"),
"restrictions": S("restrictions") >> ForallBend(AzureResourceSkuRestrictions.mapping),
"sku_size": S("size"),
"sku_tier": S("tier"),
}
api_versions: Optional[List[str]] = field(default=None, metadata={'description': 'The api versions that support this sku.'}) # fmt: skip
capabilities: Optional[List[AzureResourceSkuCapabilities]] = field(default=None, metadata={'description': 'A name value pair to describe the capability.'}) # fmt: skip
capacity: Optional[AzureResourceSkuCapacity] = field(default=None, metadata={'description': 'Describes scaling information of a sku.'}) # fmt: skip
costs: Optional[List[AzureResourceSkuCosts]] = field(default=None, metadata={'description': 'Metadata for retrieving price info.'}) # fmt: skip
family: Optional[str] = field(default=None, metadata={"description": "The family of this particular sku."})
sku_kind: Optional[str] = field(default=None, metadata={'description': 'The kind of resources that are supported in this sku.'}) # fmt: skip
location_info: Optional[List[AzureResourceSkuLocationInfo]] = field(default=None, metadata={'description': 'A list of locations and availability zones in those locations where the sku is available.'}) # fmt: skip
locations: Optional[List[str]] = field(default=None, metadata={'description': 'The set of locations that the sku is available.'}) # fmt: skip
resource_type: Optional[str] = field(default=None, metadata={'description': 'The type of resource the sku applies to.'}) # fmt: skip
restrictions: Optional[List[AzureResourceSkuRestrictions]] = field(default=None, metadata={'description': 'The restrictions because of which sku cannot be used. This is empty if there are no restrictions.'}) # fmt: skip
sku_size: Optional[str] = field(default=None, metadata={"description": "The size of the sku."})
sku_tier: Optional[str] = field(default=None, metadata={'description': 'Specifies the tier of virtual machines in a scale set. Possible values: **standard** **basic**.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureRestorePointCollectionSourceProperties:
kind: ClassVar[str] = "azure_restore_point_collection_source_properties"
mapping: ClassVar[Dict[str, Bender]] = {"id": S("id"), "location": S("location")}
id: Optional[str] = field(default=None, metadata={'description': 'Resource id of the source resource used to create this restore point collection.'}) # fmt: skip
location: Optional[str] = field(default=None, metadata={'description': 'Location of the source resource used to create this restore point collection.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureProxyResource:
kind: ClassVar[str] = "azure_proxy_resource"
mapping: ClassVar[Dict[str, Bender]] = {"id": S("id"), "name": S("name"), "type": S("type")}
id: Optional[str] = field(default=None, metadata={"description": "Resource id."})
name: Optional[str] = field(default=None, metadata={"description": "Resource name."})
type: Optional[str] = field(default=None, metadata={"description": "Resource type."})
@define(eq=False, slots=False)
class AzureVMSizeProperties:
kind: ClassVar[str] = "azure_vm_size_properties"
mapping: ClassVar[Dict[str, Bender]] = {
"v_cp_us_available": S("vCPUsAvailable"),
"v_cp_us_per_core": S("vCPUsPerCore"),
}
v_cp_us_available: Optional[int] = field(default=None, metadata={'description': 'Specifies the number of vcpus available for the vm. When this property is not specified in the request body the default behavior is to set it to the value of vcpus available for that vm size exposed in api response of [list all available virtual machine sizes in a region](https://docs. Microsoft. Com/en-us/rest/api/compute/resource-skus/list).'}) # fmt: skip
v_cp_us_per_core: Optional[int] = field(default=None, metadata={'description': 'Specifies the vcpu to physical core ratio. When this property is not specified in the request body the default behavior is set to the value of vcpuspercore for the vm size exposed in api response of [list all available virtual machine sizes in a region](https://docs. Microsoft. Com/en-us/rest/api/compute/resource-skus/list). **setting this property to 1 also means that hyper-threading is disabled. **.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureHardwareProfile:
kind: ClassVar[str] = "azure_hardware_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"vm_size": S("vmSize"),
"vm_size_properties": S("vmSizeProperties") >> Bend(AzureVMSizeProperties.mapping),
}
vm_size: Optional[str] = field(default=None, metadata={'description': 'Specifies the size of the virtual machine. The enum data type is currently deprecated and will be removed by december 23rd 2023. The recommended way to get the list of available sizes is using these apis: [list all available virtual machine sizes in an availability set](https://docs. Microsoft. Com/rest/api/compute/availabilitysets/listavailablesizes), [list all available virtual machine sizes in a region]( https://docs. Microsoft. Com/rest/api/compute/resourceskus/list), [list all available virtual machine sizes for resizing](https://docs. Microsoft. Com/rest/api/compute/virtualmachines/listavailablesizes). For more information about virtual machine sizes, see [sizes for virtual machines](https://docs. Microsoft. Com/azure/virtual-machines/sizes). The available vm sizes depend on region and availability set.'}) # fmt: skip
vm_size_properties: Optional[AzureVMSizeProperties] = field(default=None, metadata={'description': 'Specifies vm size property settings on the virtual machine.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureKeyVaultSecretReference:
kind: ClassVar[str] = "azure_key_vault_secret_reference"
mapping: ClassVar[Dict[str, Bender]] = {"secret_url": S("secretUrl"), "source_vault": S("sourceVault", "id")}
secret_url: Optional[str] = field(default=None, metadata={'description': 'The url referencing a secret in a key vault.'}) # fmt: skip
source_vault: Optional[str] = field(default=None, metadata={"description": ""})
@define(eq=False, slots=False)
class AzureKeyVaultKeyReference:
kind: ClassVar[str] = "azure_key_vault_key_reference"
mapping: ClassVar[Dict[str, Bender]] = {"key_url": S("keyUrl"), "source_vault": S("sourceVault", "id")}
key_url: Optional[str] = field(default=None, metadata={'description': 'The url referencing a key encryption key in key vault.'}) # fmt: skip
source_vault: Optional[str] = field(default=None, metadata={"description": ""})
@define(eq=False, slots=False)
class AzureDiskEncryptionSettings:
kind: ClassVar[str] = "azure_disk_encryption_settings"
mapping: ClassVar[Dict[str, Bender]] = {
"disk_encryption_key": S("diskEncryptionKey") >> Bend(AzureKeyVaultSecretReference.mapping),
"enabled": S("enabled"),
"key_encryption_key": S("keyEncryptionKey") >> Bend(AzureKeyVaultKeyReference.mapping),
}
disk_encryption_key: Optional[AzureKeyVaultSecretReference] = field(default=None, metadata={'description': 'Describes a reference to key vault secret.'}) # fmt: skip
enabled: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether disk encryption should be enabled on the virtual machine.'}) # fmt: skip
key_encryption_key: Optional[AzureKeyVaultKeyReference] = field(default=None, metadata={'description': 'Describes a reference to key vault key.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVMDiskSecurityProfile:
kind: ClassVar[str] = "azure_vm_disk_security_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"disk_encryption_set": S("diskEncryptionSet") >> Bend(AzureDiskEncryptionSetParameters.mapping),
"security_encryption_type": S("securityEncryptionType"),
}
disk_encryption_set: Optional[AzureDiskEncryptionSetParameters] = field(default=None, metadata={'description': 'Describes the parameter of customer managed disk encryption set resource id that can be specified for disk. **note:** the disk encryption set resource id can only be specified for managed disk. Please refer https://aka. Ms/mdssewithcmkoverview for more details.'}) # fmt: skip
security_encryption_type: Optional[str] = field(default=None, metadata={'description': 'Specifies the encryptiontype of the managed disk. It is set to diskwithvmgueststate for encryption of the managed disk along with vmgueststate blob, and vmgueststateonly for encryption of just the vmgueststate blob. **note:** it can be set for only confidential vms.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureManagedDiskParameters(AzureSubResource):
kind: ClassVar[str] = "azure_managed_disk_parameters"
mapping: ClassVar[Dict[str, Bender]] = {
"disk_encryption_set": S("diskEncryptionSet") >> Bend(AzureDiskEncryptionSetParameters.mapping),
"disk_parameters_security_profile": S("securityProfile") >> Bend(AzureVMDiskSecurityProfile.mapping),
"storage_account_type": S("storageAccountType"),
}
disk_encryption_set: Optional[AzureDiskEncryptionSetParameters] = field(default=None, metadata={'description': 'Describes the parameter of customer managed disk encryption set resource id that can be specified for disk. **note:** the disk encryption set resource id can only be specified for managed disk. Please refer https://aka. Ms/mdssewithcmkoverview for more details.'}) # fmt: skip
disk_parameters_security_profile: Optional[AzureVMDiskSecurityProfile] = field(default=None, metadata={'description': 'Specifies the security profile settings for the managed disk. **note:** it can only be set for confidential vms.'}) # fmt: skip
storage_account_type: Optional[str] = field(default=None, metadata={'description': 'Specifies the storage account type for the managed disk. Managed os disk storage account type can only be set when you create the scale set. Note: ultrassd_lrs can only be used with data disks. It cannot be used with os disk. Standard_lrs uses standard hdd. Standardssd_lrs uses standard ssd. Premium_lrs uses premium ssd. Ultrassd_lrs uses ultra disk. Premium_zrs uses premium ssd zone redundant storage. Standardssd_zrs uses standard ssd zone redundant storage. For more information regarding disks supported for windows virtual machines, refer to https://docs. Microsoft. Com/azure/virtual-machines/windows/disks-types and, for linux virtual machines, refer to https://docs. Microsoft. Com/azure/virtual-machines/linux/disks-types.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureSubResourceReadOnly:
kind: ClassVar[str] = "azure_sub_resource_read_only"
mapping: ClassVar[Dict[str, Bender]] = {"id": S("id")}
id: Optional[str] = field(default=None, metadata={"description": "Resource id."})
@define(eq=False, slots=False)
class AzureRestorePointEncryption:
kind: ClassVar[str] = "azure_restore_point_encryption"
mapping: ClassVar[Dict[str, Bender]] = {
"disk_encryption_set": S("diskEncryptionSet") >> Bend(AzureDiskEncryptionSetParameters.mapping),
"type": S("type"),
}
disk_encryption_set: Optional[AzureDiskEncryptionSetParameters] = field(default=None, metadata={'description': 'Describes the parameter of customer managed disk encryption set resource id that can be specified for disk. **note:** the disk encryption set resource id can only be specified for managed disk. Please refer https://aka. Ms/mdssewithcmkoverview for more details.'}) # fmt: skip
type: Optional[str] = field(default=None, metadata={'description': 'The type of key used to encrypt the data of the disk restore point.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureDiskRestorePointAttributes(AzureSubResourceReadOnly):
kind: ClassVar[str] = "azure_disk_restore_point_attributes"
mapping: ClassVar[Dict[str, Bender]] = {
"encryption": S("encryption") >> Bend(AzureRestorePointEncryption.mapping),
"source_disk_restore_point": S("sourceDiskRestorePoint", "id"),
}
encryption: Optional[AzureRestorePointEncryption] = field(default=None, metadata={'description': 'Encryption at rest settings for disk restore point. It is an optional property that can be specified in the input while creating a restore point.'}) # fmt: skip
source_disk_restore_point: Optional[str] = field(default=None, metadata={'description': 'The api entity reference.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureRestorePointSourceVMOSDisk:
kind: ClassVar[str] = "azure_restore_point_source_vmos_disk"
mapping: ClassVar[Dict[str, Bender]] = {
"caching": S("caching"),
"disk_restore_point": S("diskRestorePoint") >> Bend(AzureDiskRestorePointAttributes.mapping),
"disk_size_gb": S("diskSizeGB"),
"encryption_settings": S("encryptionSettings") >> Bend(AzureDiskEncryptionSettings.mapping),
"managed_disk": S("managedDisk") >> Bend(AzureManagedDiskParameters.mapping),
"name": S("name"),
"os_type": S("osType"),
"write_accelerator_enabled": S("writeAcceleratorEnabled"),
}
caching: Optional[str] = field(default=None, metadata={'description': 'Specifies the caching requirements. Possible values are: **none,** **readonly,** **readwrite. ** the default values are: **none for standard storage. Readonly for premium storage**.'}) # fmt: skip
disk_restore_point: Optional[AzureDiskRestorePointAttributes] = field(default=None, metadata={'description': 'Disk restore point details.'}) # fmt: skip
disk_size_gb: Optional[int] = field(default=None, metadata={"description": "Gets the disk size in gb."})
encryption_settings: Optional[AzureDiskEncryptionSettings] = field(default=None, metadata={'description': 'Describes a encryption settings for a disk.'}) # fmt: skip
managed_disk: Optional[AzureManagedDiskParameters] = field(default=None, metadata={'description': 'The parameters of a managed disk.'}) # fmt: skip
name: Optional[str] = field(default=None, metadata={"description": "Gets the disk name."})
os_type: Optional[str] = field(default=None, metadata={"description": "Gets the operating system type."})
write_accelerator_enabled: Optional[bool] = field(default=None, metadata={'description': 'Shows true if the disk is write-accelerator enabled.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureRestorePointSourceVMDataDisk:
kind: ClassVar[str] = "azure_restore_point_source_vm_data_disk"
mapping: ClassVar[Dict[str, Bender]] = {
"caching": S("caching"),
"disk_restore_point": S("diskRestorePoint") >> Bend(AzureDiskRestorePointAttributes.mapping),
"disk_size_gb": S("diskSizeGB"),
"lun": S("lun"),
"managed_disk": S("managedDisk") >> Bend(AzureManagedDiskParameters.mapping),
"name": S("name"),
"write_accelerator_enabled": S("writeAcceleratorEnabled"),
}
caching: Optional[str] = field(default=None, metadata={'description': 'Specifies the caching requirements. Possible values are: **none,** **readonly,** **readwrite. ** the default values are: **none for standard storage. Readonly for premium storage**.'}) # fmt: skip
disk_restore_point: Optional[AzureDiskRestorePointAttributes] = field(default=None, metadata={'description': 'Disk restore point details.'}) # fmt: skip
disk_size_gb: Optional[int] = field(default=None, metadata={'description': 'Gets the initial disk size in gb for blank data disks, and the new desired size for existing os and data disks.'}) # fmt: skip
lun: Optional[int] = field(default=None, metadata={"description": "Gets the logical unit number."})
managed_disk: Optional[AzureManagedDiskParameters] = field(default=None, metadata={'description': 'The parameters of a managed disk.'}) # fmt: skip
name: Optional[str] = field(default=None, metadata={"description": "Gets the disk name."})
write_accelerator_enabled: Optional[bool] = field(default=None, metadata={'description': 'Shows true if the disk is write-accelerator enabled.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureRestorePointSourceVMStorageProfile:
kind: ClassVar[str] = "azure_restore_point_source_vm_storage_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"data_disks": S("dataDisks") >> ForallBend(AzureRestorePointSourceVMDataDisk.mapping),
"os_disk": S("osDisk") >> Bend(AzureRestorePointSourceVMOSDisk.mapping),
}
data_disks: Optional[List[AzureRestorePointSourceVMDataDisk]] = field(default=None, metadata={'description': 'Gets the data disks of the vm captured at the time of the restore point creation.'}) # fmt: skip
os_disk: Optional[AzureRestorePointSourceVMOSDisk] = field(default=None, metadata={'description': 'Describes an operating system disk.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureAdditionalUnattendContent:
kind: ClassVar[str] = "azure_additional_unattend_content"
mapping: ClassVar[Dict[str, Bender]] = {
"component_name": S("componentName"),
"content": S("content"),
"pass_name": S("passName"),
"setting_name": S("settingName"),
}
component_name: Optional[str] = field(default=None, metadata={'description': 'The component name. Currently, the only allowable value is microsoft-windows-shell-setup.'}) # fmt: skip
content: Optional[str] = field(default=None, metadata={'description': 'Specifies the xml formatted content that is added to the unattend. Xml file for the specified path and component. The xml must be less than 4kb and must include the root element for the setting or feature that is being inserted.'}) # fmt: skip
pass_name: Optional[str] = field(default=None, metadata={'description': 'The pass name. Currently, the only allowable value is oobesystem.'}) # fmt: skip
setting_name: Optional[str] = field(default=None, metadata={'description': 'Specifies the name of the setting to which the content applies. Possible values are: firstlogoncommands and autologon.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureWindowsVMGuestPatchAutomaticByPlatformSettings:
kind: ClassVar[str] = "azure_windows_vm_guest_patch_automatic_by_platform_settings"
mapping: ClassVar[Dict[str, Bender]] = {
"bypass_platform_safety_checks_on_user_schedule": S("bypassPlatformSafetyChecksOnUserSchedule"),
"reboot_setting": S("rebootSetting"),
}
bypass_platform_safety_checks_on_user_schedule: Optional[bool] = field(default=None, metadata={'description': 'Enables customer to schedule patching without accidental upgrades.'}) # fmt: skip
reboot_setting: Optional[str] = field(default=None, metadata={'description': 'Specifies the reboot setting for all automaticbyplatform patch installation operations.'}) # fmt: skip
@define(eq=False, slots=False)
class AzurePatchSettings:
kind: ClassVar[str] = "azure_patch_settings"
mapping: ClassVar[Dict[str, Bender]] = {
"assessment_mode": S("assessmentMode"),
"automatic_by_platform_settings": S("automaticByPlatformSettings")
>> Bend(AzureWindowsVMGuestPatchAutomaticByPlatformSettings.mapping),
"enable_hotpatching": S("enableHotpatching"),
"patch_mode": S("patchMode"),
}
assessment_mode: Optional[str] = field(default=None, metadata={'description': 'Specifies the mode of vm guest patch assessment for the iaas virtual machine. Possible values are: **imagedefault** - you control the timing of patch assessments on a virtual machine. **automaticbyplatform** - the platform will trigger periodic patch assessments. The property provisionvmagent must be true.'}) # fmt: skip
automatic_by_platform_settings: Optional[AzureWindowsVMGuestPatchAutomaticByPlatformSettings] = field(default=None, metadata={'description': 'Specifies additional settings to be applied when patch mode automaticbyplatform is selected in windows patch settings.'}) # fmt: skip
enable_hotpatching: Optional[bool] = field(default=None, metadata={'description': 'Enables customers to patch their azure vms without requiring a reboot. For enablehotpatching, the provisionvmagent must be set to true and patchmode must be set to automaticbyplatform.'}) # fmt: skip
patch_mode: Optional[str] = field(default=None, metadata={'description': 'Specifies the mode of vm guest patching to iaas virtual machine or virtual machines associated to virtual machine scale set with orchestrationmode as flexible. Possible values are: **manual** - you control the application of patches to a virtual machine. You do this by applying patches manually inside the vm. In this mode, automatic updates are disabled; the property windowsconfiguration. Enableautomaticupdates must be false **automaticbyos** - the virtual machine will automatically be updated by the os. The property windowsconfiguration. Enableautomaticupdates must be true. **automaticbyplatform** - the virtual machine will automatically updated by the platform. The properties provisionvmagent and windowsconfiguration. Enableautomaticupdates must be true.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureWinRMListener:
kind: ClassVar[str] = "azure_win_rm_listener"
mapping: ClassVar[Dict[str, Bender]] = {"certificate_url": S("certificateUrl"), "protocol": S("protocol")}
certificate_url: Optional[str] = field(default=None, metadata={'description': 'This is the url of a certificate that has been uploaded to key vault as a secret. For adding a secret to the key vault, see [add a key or secret to the key vault](https://docs. Microsoft. Com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be the base64 encoding of the following json object which is encoded in utf-8: { data : <base64-encoded-certificate> , datatype : pfx , password : <pfx-file-password> } to install certificates on a virtual machine it is recommended to use the [azure key vault virtual machine extension for linux](https://docs. Microsoft. Com/azure/virtual-machines/extensions/key-vault-linux) or the [azure key vault virtual machine extension for windows](https://docs. Microsoft. Com/azure/virtual-machines/extensions/key-vault-windows).'}) # fmt: skip
protocol: Optional[str] = field(default=None, metadata={'description': 'Specifies the protocol of winrm listener. Possible values are: **http,** **https. **.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureWinRMConfiguration:
kind: ClassVar[str] = "azure_win_rm_configuration"
mapping: ClassVar[Dict[str, Bender]] = {"listeners": S("listeners") >> ForallBend(AzureWinRMListener.mapping)}
listeners: Optional[List[AzureWinRMListener]] = field(default=None, metadata={'description': 'The list of windows remote management listeners.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureWindowsConfiguration:
kind: ClassVar[str] = "azure_windows_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"additional_unattend_content": S("additionalUnattendContent")
>> ForallBend(AzureAdditionalUnattendContent.mapping),
"enable_automatic_updates": S("enableAutomaticUpdates"),
"enable_vm_agent_platform_updates": S("enableVMAgentPlatformUpdates"),
"patch_settings": S("patchSettings") >> Bend(AzurePatchSettings.mapping),
"provision_vm_agent": S("provisionVMAgent"),
"time_zone": S("timeZone"),
"win_rm": S("winRM") >> Bend(AzureWinRMConfiguration.mapping),
}
additional_unattend_content: Optional[List[AzureAdditionalUnattendContent]] = field(default=None, metadata={'description': 'Specifies additional base-64 encoded xml formatted information that can be included in the unattend. Xml file, which is used by windows setup.'}) # fmt: skip
enable_automatic_updates: Optional[bool] = field(default=None, metadata={'description': 'Indicates whether automatic updates is enabled for the windows virtual machine. Default value is true. For virtual machine scale sets, this property can be updated and updates will take effect on os reprovisioning.'}) # fmt: skip
enable_vm_agent_platform_updates: Optional[bool] = field(default=None, metadata={'description': 'Indicates whether vmagent platform updates is enabled for the windows virtual machine. Default value is false.'}) # fmt: skip
patch_settings: Optional[AzurePatchSettings] = field(default=None, metadata={'description': 'Specifies settings related to vm guest patching on windows.'}) # fmt: skip
provision_vm_agent: Optional[bool] = field(default=None, metadata={'description': 'Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified in the request body, it is set to true by default. This will ensure that vm agent is installed on the vm so that extensions can be added to the vm later.'}) # fmt: skip
time_zone: Optional[str] = field(default=None, metadata={'description': 'Specifies the time zone of the virtual machine. E. G. Pacific standard time. Possible values can be [timezoneinfo. Id](https://docs. Microsoft. Com/dotnet/api/system. Timezoneinfo. Id?#system_timezoneinfo_id) value from time zones returned by [timezoneinfo. Getsystemtimezones](https://docs. Microsoft. Com/dotnet/api/system. Timezoneinfo. Getsystemtimezones).'}) # fmt: skip
win_rm: Optional[AzureWinRMConfiguration] = field(default=None, metadata={'description': 'Describes windows remote management configuration of the vm.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureSshPublicKey:
kind: ClassVar[str] = "azure_ssh_public_key"
mapping: ClassVar[Dict[str, Bender]] = {"key_data": S("keyData"), "path": S("path")}
key_data: Optional[str] = field(default=None, metadata={'description': 'Ssh public key certificate used to authenticate with the vm through ssh. The key needs to be at least 2048-bit and in ssh-rsa format. For creating ssh keys, see [create ssh keys on linux and mac for linux vms in azure]https://docs. Microsoft. Com/azure/virtual-machines/linux/create-ssh-keys-detailed).'}) # fmt: skip
path: Optional[str] = field(default=None, metadata={'description': 'Specifies the full path on the created vm where ssh public key is stored. If the file already exists, the specified key is appended to the file. Example: /home/user/. Ssh/authorized_keys.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureSshConfiguration:
kind: ClassVar[str] = "azure_ssh_configuration"
mapping: ClassVar[Dict[str, Bender]] = {"public_keys": S("publicKeys") >> ForallBend(AzureSshPublicKey.mapping)}
public_keys: Optional[List[AzureSshPublicKey]] = field(default=None, metadata={'description': 'The list of ssh public keys used to authenticate with linux based vms.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureLinuxVMGuestPatchAutomaticByPlatformSettings:
kind: ClassVar[str] = "azure_linux_vm_guest_patch_automatic_by_platform_settings"
mapping: ClassVar[Dict[str, Bender]] = {
"bypass_platform_safety_checks_on_user_schedule": S("bypassPlatformSafetyChecksOnUserSchedule"),
"reboot_setting": S("rebootSetting"),
}
bypass_platform_safety_checks_on_user_schedule: Optional[bool] = field(default=None, metadata={'description': 'Enables customer to schedule patching without accidental upgrades.'}) # fmt: skip
reboot_setting: Optional[str] = field(default=None, metadata={'description': 'Specifies the reboot setting for all automaticbyplatform patch installation operations.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureLinuxPatchSettings:
kind: ClassVar[str] = "azure_linux_patch_settings"
mapping: ClassVar[Dict[str, Bender]] = {
"assessment_mode": S("assessmentMode"),
"automatic_by_platform_settings": S("automaticByPlatformSettings")
>> Bend(AzureLinuxVMGuestPatchAutomaticByPlatformSettings.mapping),
"patch_mode": S("patchMode"),
}
assessment_mode: Optional[str] = field(default=None, metadata={'description': 'Specifies the mode of vm guest patch assessment for the iaas virtual machine. Possible values are: **imagedefault** - you control the timing of patch assessments on a virtual machine. **automaticbyplatform** - the platform will trigger periodic patch assessments. The property provisionvmagent must be true.'}) # fmt: skip
automatic_by_platform_settings: Optional[AzureLinuxVMGuestPatchAutomaticByPlatformSettings] = field(default=None, metadata={'description': 'Specifies additional settings to be applied when patch mode automaticbyplatform is selected in linux patch settings.'}) # fmt: skip
patch_mode: Optional[str] = field(default=None, metadata={'description': 'Specifies the mode of vm guest patching to iaas virtual machine or virtual machines associated to virtual machine scale set with orchestrationmode as flexible. Possible values are: **imagedefault** - the virtual machine s default patching configuration is used. **automaticbyplatform** - the virtual machine will be automatically updated by the platform. The property provisionvmagent must be true.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureLinuxConfiguration:
kind: ClassVar[str] = "azure_linux_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"disable_password_authentication": S("disablePasswordAuthentication"),
"enable_vm_agent_platform_updates": S("enableVMAgentPlatformUpdates"),
"patch_settings": S("patchSettings") >> Bend(AzureLinuxPatchSettings.mapping),
"provision_vm_agent": S("provisionVMAgent"),
"ssh": S("ssh") >> Bend(AzureSshConfiguration.mapping),
}
disable_password_authentication: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether password authentication should be disabled.'}) # fmt: skip
enable_vm_agent_platform_updates: Optional[bool] = field(default=None, metadata={'description': 'Indicates whether vmagent platform updates is enabled for the linux virtual machine. Default value is false.'}) # fmt: skip
patch_settings: Optional[AzureLinuxPatchSettings] = field(default=None, metadata={'description': 'Specifies settings related to vm guest patching on linux.'}) # fmt: skip
provision_vm_agent: Optional[bool] = field(default=None, metadata={'description': 'Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified in the request body, default behavior is to set it to true. This will ensure that vm agent is installed on the vm so that extensions can be added to the vm later.'}) # fmt: skip
ssh: Optional[AzureSshConfiguration] = field(default=None, metadata={'description': 'Ssh configuration for linux based vms running on azure.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVaultCertificate:
kind: ClassVar[str] = "azure_vault_certificate"
mapping: ClassVar[Dict[str, Bender]] = {
"certificate_store": S("certificateStore"),
"certificate_url": S("certificateUrl"),
}
certificate_store: Optional[str] = field(default=None, metadata={'description': 'For windows vms, specifies the certificate store on the virtual machine to which the certificate should be added. The specified certificate store is implicitly in the localmachine account. For linux vms, the certificate file is placed under the /var/lib/waagent directory, with the file name <uppercasethumbprint>. Crt for the x509 certificate file and <uppercasethumbprint>. Prv for private key. Both of these files are. Pem formatted.'}) # fmt: skip
certificate_url: Optional[str] = field(default=None, metadata={'description': 'This is the url of a certificate that has been uploaded to key vault as a secret. For adding a secret to the key vault, see [add a key or secret to the key vault](https://docs. Microsoft. Com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be it is the base64 encoding of the following json object which is encoded in utf-8: { data : <base64-encoded-certificate> , datatype : pfx , password : <pfx-file-password> } to install certificates on a virtual machine it is recommended to use the [azure key vault virtual machine extension for linux](https://docs. Microsoft. Com/azure/virtual-machines/extensions/key-vault-linux) or the [azure key vault virtual machine extension for windows](https://docs. Microsoft. Com/azure/virtual-machines/extensions/key-vault-windows).'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVaultSecretGroup:
kind: ClassVar[str] = "azure_vault_secret_group"
mapping: ClassVar[Dict[str, Bender]] = {
"source_vault": S("sourceVault", "id"),
"vault_certificates": S("vaultCertificates") >> ForallBend(AzureVaultCertificate.mapping),
}
source_vault: Optional[str] = field(default=None, metadata={"description": ""})
vault_certificates: Optional[List[AzureVaultCertificate]] = field(default=None, metadata={'description': 'The list of key vault references in sourcevault which contain certificates.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureOSProfile:
kind: ClassVar[str] = "azure_os_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"admin_password": S("adminPassword"),
"admin_username": S("adminUsername"),
"allow_extension_operations": S("allowExtensionOperations"),
"computer_name": S("computerName"),
"custom_data": S("customData"),
"linux_configuration": S("linuxConfiguration") >> Bend(AzureLinuxConfiguration.mapping),
"require_guest_provision_signal": S("requireGuestProvisionSignal"),
"secrets": S("secrets") >> ForallBend(AzureVaultSecretGroup.mapping),
"windows_configuration": S("windowsConfiguration") >> Bend(AzureWindowsConfiguration.mapping),
}
admin_password: Optional[str] = field(default=None, metadata={'description': 'Specifies the password of the administrator account. **minimum-length (windows):** 8 characters **minimum-length (linux):** 6 characters **max-length (windows):** 123 characters **max-length (linux):** 72 characters **complexity requirements:** 3 out of 4 conditions below need to be fulfilled has lower characters has upper characters has a digit has a special character (regex match [\\w_]) **disallowed values:** abc@123 , p@$$w0rd , p@ssw0rd , p@ssword123 , pa$$word , pass@word1 , password! , password1 , password22 , iloveyou! for resetting the password, see [how to reset the remote desktop service or its login password in a windows vm](https://docs. Microsoft. Com/troubleshoot/azure/virtual-machines/reset-rdp) for resetting root password, see [manage users, ssh, and check or repair disks on azure linux vms using the vmaccess extension](https://docs. Microsoft. Com/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection).'}) # fmt: skip
admin_username: Optional[str] = field(default=None, metadata={'description': 'Specifies the name of the administrator account. This property cannot be updated after the vm is created. **windows-only restriction:** cannot end in. **disallowed values:** administrator , admin , user , user1 , test , user2 , test1 , user3 , admin1 , 1 , 123 , a , actuser , adm , admin2 , aspnet , backup , console , david , guest , john , owner , root , server , sql , support , support_388945a0 , sys , test2 , test3 , user4 , user5. **minimum-length (linux):** 1 character **max-length (linux):** 64 characters **max-length (windows):** 20 characters.'}) # fmt: skip
allow_extension_operations: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether extension operations should be allowed on the virtual machine. This may only be set to false when no extensions are present on the virtual machine.'}) # fmt: skip
computer_name: Optional[str] = field(default=None, metadata={'description': 'Specifies the host os name of the virtual machine. This name cannot be updated after the vm is created. **max-length (windows):** 15 characters. **max-length (linux):** 64 characters. For naming conventions and restrictions see [azure infrastructure services implementation guidelines](https://docs. Microsoft. Com/azure/azure-resource-manager/management/resource-name-rules).'}) # fmt: skip
custom_data: Optional[str] = field(default=None, metadata={'description': 'Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the virtual machine. The maximum length of the binary array is 65535 bytes. **note: do not pass any secrets or passwords in customdata property. ** this property cannot be updated after the vm is created. The property customdata is passed to the vm to be saved as a file, for more information see [custom data on azure vms](https://azure. Microsoft. Com/blog/custom-data-and-cloud-init-on-windows-azure/). For using cloud-init for your linux vm, see [using cloud-init to customize a linux vm during creation](https://docs. Microsoft. Com/azure/virtual-machines/linux/using-cloud-init).'}) # fmt: skip
linux_configuration: Optional[AzureLinuxConfiguration] = field(default=None, metadata={'description': 'Specifies the linux operating system settings on the virtual machine. For a list of supported linux distributions, see [linux on azure-endorsed distributions](https://docs. Microsoft. Com/azure/virtual-machines/linux/endorsed-distros).'}) # fmt: skip
require_guest_provision_signal: Optional[bool] = field(default=None, metadata={'description': 'Optional property which must either be set to true or omitted.'}) # fmt: skip
secrets: Optional[List[AzureVaultSecretGroup]] = field(default=None, metadata={'description': 'Specifies set of certificates that should be installed onto the virtual machine. To install certificates on a virtual machine it is recommended to use the [azure key vault virtual machine extension for linux](https://docs. Microsoft. Com/azure/virtual-machines/extensions/key-vault-linux) or the [azure key vault virtual machine extension for windows](https://docs. Microsoft. Com/azure/virtual-machines/extensions/key-vault-windows).'}) # fmt: skip
windows_configuration: Optional[AzureWindowsConfiguration] = field(default=None, metadata={'description': 'Specifies windows operating system settings on the virtual machine.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureBootDiagnostics:
kind: ClassVar[str] = "azure_boot_diagnostics"
mapping: ClassVar[Dict[str, Bender]] = {"enabled": S("enabled"), "storage_uri": S("storageUri")}
enabled: Optional[bool] = field(default=None, metadata={'description': 'Whether boot diagnostics should be enabled on the virtual machine.'}) # fmt: skip
storage_uri: Optional[str] = field(default=None, metadata={'description': 'Uri of the storage account to use for placing the console output and screenshot. If storageuri is not specified while enabling boot diagnostics, managed storage will be used.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureDiagnosticsProfile:
kind: ClassVar[str] = "azure_diagnostics_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"boot_diagnostics": S("bootDiagnostics") >> Bend(AzureBootDiagnostics.mapping)
}
boot_diagnostics: Optional[AzureBootDiagnostics] = field(default=None, metadata={'description': 'Boot diagnostics is a debugging feature which allows you to view console output and screenshot to diagnose vm status. You can easily view the output of your console log. Azure also enables you to see a screenshot of the vm from the hypervisor.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureUefiSettings:
kind: ClassVar[str] = "azure_uefi_settings"
mapping: ClassVar[Dict[str, Bender]] = {
"secure_boot_enabled": S("secureBootEnabled"),
"v_tpm_enabled": S("vTpmEnabled"),
}
secure_boot_enabled: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether secure boot should be enabled on the virtual machine. Minimum api-version: 2020-12-01.'}) # fmt: skip
v_tpm_enabled: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether vtpm should be enabled on the virtual machine. Minimum api-version: 2020-12-01.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureSecurityProfile:
kind: ClassVar[str] = "azure_security_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"encryption_at_host": S("encryptionAtHost"),
"security_type": S("securityType"),
"uefi_settings": S("uefiSettings") >> Bend(AzureUefiSettings.mapping),
}
encryption_at_host: Optional[bool] = field(default=None, metadata={'description': 'This property can be used by user in the request to enable or disable the host encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including resource/temp disk at host itself. The default behavior is: the encryption at host will be disabled unless this property is set to true for the resource.'}) # fmt: skip
security_type: Optional[str] = field(default=None, metadata={'description': 'Specifies the securitytype of the virtual machine. It has to be set to any specified value to enable uefisettings. The default behavior is: uefisettings will not be enabled unless this property is set.'}) # fmt: skip
uefi_settings: Optional[AzureUefiSettings] = field(default=None, metadata={'description': 'Specifies the security settings like secure boot and vtpm used while creating the virtual machine. Minimum api-version: 2020-12-01.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureRestorePointSourceMetadata:
kind: ClassVar[str] = "azure_restore_point_source_metadata"
mapping: ClassVar[Dict[str, Bender]] = {
"diagnostics_profile": S("diagnosticsProfile") >> Bend(AzureDiagnosticsProfile.mapping),
"hardware_profile": S("hardwareProfile") >> Bend(AzureHardwareProfile.mapping),
"hyper_v_generation": S("hyperVGeneration"),
"license_type": S("licenseType"),
"location": S("location"),
"os_profile": S("osProfile") >> Bend(AzureOSProfile.mapping),
"security_profile": S("securityProfile") >> Bend(AzureSecurityProfile.mapping),
"storage_profile": S("storageProfile") >> Bend(AzureRestorePointSourceVMStorageProfile.mapping),
"user_data": S("userData"),
"vm_id": S("vmId"),
}
diagnostics_profile: Optional[AzureDiagnosticsProfile] = field(default=None, metadata={'description': 'Specifies the boot diagnostic settings state. Minimum api-version: 2015-06-15.'}) # fmt: skip
hardware_profile: Optional[AzureHardwareProfile] = field(default=None, metadata={'description': 'Specifies the hardware settings for the virtual machine.'}) # fmt: skip
hyper_v_generation: Optional[str] = field(default=None, metadata={'description': 'Specifies the hypervgeneration type.'}) # fmt: skip
license_type: Optional[str] = field(default=None, metadata={'description': 'Gets the license type, which is for bring your own license scenario.'}) # fmt: skip
location: Optional[str] = field(default=None, metadata={'description': 'Location of the vm from which the restore point was created.'}) # fmt: skip
os_profile: Optional[AzureOSProfile] = field(default=None, metadata={'description': 'Specifies the operating system settings for the virtual machine. Some of the settings cannot be changed once vm is provisioned.'}) # fmt: skip
security_profile: Optional[AzureSecurityProfile] = field(default=None, metadata={'description': 'Specifies the security profile settings for the virtual machine or virtual machine scale set.'}) # fmt: skip
storage_profile: Optional[AzureRestorePointSourceVMStorageProfile] = field(default=None, metadata={'description': 'Describes the storage profile.'}) # fmt: skip
user_data: Optional[str] = field(default=None, metadata={'description': 'Userdata associated with the source vm for which restore point is captured, which is a base-64 encoded value.'}) # fmt: skip
vm_id: Optional[str] = field(default=None, metadata={"description": "Gets the virtual machine unique id."})
@define(eq=False, slots=False)
class AzureDiskRestorePointReplicationStatus:
kind: ClassVar[str] = "azure_disk_restore_point_replication_status"
mapping: ClassVar[Dict[str, Bender]] = {
"completion_percent": S("completionPercent"),
"status": S("status") >> Bend(AzureInstanceViewStatus.mapping),
}
completion_percent: Optional[int] = field(default=None, metadata={'description': 'Replication completion percentage.'}) # fmt: skip
status: Optional[AzureInstanceViewStatus] = field(default=None, metadata={"description": "Instance view status."})
@define(eq=False, slots=False)
class AzureDiskRestorePointInstanceView:
kind: ClassVar[str] = "azure_disk_restore_point_instance_view"
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"replication_status": S("replicationStatus") >> Bend(AzureDiskRestorePointReplicationStatus.mapping),
}
id: Optional[str] = field(default=None, metadata={"description": "Disk restore point id."})
replication_status: Optional[AzureDiskRestorePointReplicationStatus] = field(default=None, metadata={'description': 'The instance view of a disk restore point.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureRestorePointInstanceView:
kind: ClassVar[str] = "azure_restore_point_instance_view"
mapping: ClassVar[Dict[str, Bender]] = {
"disk_restore_points": S("diskRestorePoints") >> ForallBend(AzureDiskRestorePointInstanceView.mapping),
"statuses": S("statuses") >> ForallBend(AzureInstanceViewStatus.mapping),
}
disk_restore_points: Optional[List[AzureDiskRestorePointInstanceView]] = field(default=None, metadata={'description': 'The disk restore points information.'}) # fmt: skip
statuses: Optional[List[AzureInstanceViewStatus]] = field(default=None, metadata={'description': 'The resource status information.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureRestorePoint(AzureProxyResource):
kind: ClassVar[str] = "azure_restore_point"
mapping: ClassVar[Dict[str, Bender]] = {
"consistency_mode": S("properties", "consistencyMode"),
"exclude_disks": S("properties") >> S("excludeDisks", default=[]) >> ForallBend(S("id")),
"restore_point_instance_view": S("properties", "instanceView") >> Bend(AzureRestorePointInstanceView.mapping),
"provisioning_state": S("properties", "provisioningState"),
"source_metadata": S("properties", "sourceMetadata") >> Bend(AzureRestorePointSourceMetadata.mapping),
"source_restore_point": S("properties", "sourceRestorePoint", "id"),
"time_created": S("properties", "timeCreated"),
}
consistency_mode: Optional[str] = field(default=None, metadata={'description': 'Consistencymode of the restorepoint. Can be specified in the input while creating a restore point. For now, only crashconsistent is accepted as a valid input. Please refer to https://aka. Ms/restorepoints for more details.'}) # fmt: skip
exclude_disks: Optional[List[str]] = field(default=None, metadata={'description': 'List of disk resource ids that the customer wishes to exclude from the restore point. If no disks are specified, all disks will be included.'}) # fmt: skip
restore_point_instance_view: Optional[AzureRestorePointInstanceView] = field(default=None, metadata={'description': 'The instance view of a restore point.'}) # fmt: skip
provisioning_state: Optional[str] = field(default=None, metadata={'description': 'Gets the provisioning state of the restore point.'}) # fmt: skip
source_metadata: Optional[AzureRestorePointSourceMetadata] = field(default=None, metadata={'description': 'Describes the properties of the virtual machine for which the restore point was created. The properties provided are a subset and the snapshot of the overall virtual machine properties captured at the time of the restore point creation.'}) # fmt: skip
source_restore_point: Optional[str] = field(default=None, metadata={"description": "The api entity reference."})
time_created: Optional[datetime] = field(default=None, metadata={'description': 'Gets the creation time of the restore point.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureRestorePointCollection(AzureResource):
kind: ClassVar[str] = "azure_restore_point_collection"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2023-03-01",
path="/subscriptions/{subscriptionId}/providers/Microsoft.Compute/restorePointCollections",
path_parameters=["subscriptionId"],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": K(None),
"mtime": K(None),
"atime": K(None),
"provisioning_state": S("properties", "provisioningState"),
"restore_point_collection_id": S("properties", "restorePointCollectionId"),
"restore_points": S("properties", "restorePoints") >> ForallBend(AzureRestorePoint.mapping),
"source": S("properties", "source") >> Bend(AzureRestorePointCollectionSourceProperties.mapping),
}
provisioning_state: Optional[str] = field(default=None, metadata={'description': 'The provisioning state of the restore point collection.'}) # fmt: skip
restore_point_collection_id: Optional[str] = field(default=None, metadata={'description': 'The unique id of the restore point collection.'}) # fmt: skip
restore_points: Optional[List[AzureRestorePoint]] = field(default=None, metadata={'description': 'A list containing all restore points created under this restore point collection.'}) # fmt: skip
source: Optional[AzureRestorePointCollectionSourceProperties] = field(default=None, metadata={'description': 'The properties of the source resource that this restore point collection is created from.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureSnapshotSku:
kind: ClassVar[str] = "azure_snapshot_sku"
mapping: ClassVar[Dict[str, Bender]] = {"name": S("name"), "tier": S("tier")}
name: Optional[str] = field(default=None, metadata={"description": "The sku name."})
tier: Optional[str] = field(default=None, metadata={"description": "The sku tier."})
@define(eq=False, slots=False)
class AzureCopyCompletionError:
kind: ClassVar[str] = "azure_copy_completion_error"
mapping: ClassVar[Dict[str, Bender]] = {"error_code": S("errorCode"), "error_message": S("errorMessage")}
error_code: Optional[str] = field(default=None, metadata={'description': 'Indicates the error code if the background copy of a resource created via the copystart operation fails.'}) # fmt: skip
error_message: Optional[str] = field(default=None, metadata={'description': 'Indicates the error message if the background copy of a resource created via the copystart operation fails.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureSnapshot(AzureResource):
kind: ClassVar[str] = "azure_snapshot"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2023-01-02",
path="/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots",
path_parameters=["subscriptionId"],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": S("properties", "timeCreated"),
"mtime": K(None),
"atime": K(None),
"completion_percent": S("properties", "completionPercent"),
"copy_completion_error": S("properties", "copyCompletionError") >> Bend(AzureCopyCompletionError.mapping),
"creation_data": S("properties", "creationData") >> Bend(AzureCreationData.mapping),
"data_access_auth_mode": S("properties", "dataAccessAuthMode"),
"disk_access_id": S("properties", "diskAccessId"),
"disk_size_bytes": S("properties", "diskSizeBytes"),
"disk_size_gb": S("properties", "diskSizeGB"),
"disk_state": S("properties", "diskState"),
"snapshot_encryption": S("properties", "encryption") >> Bend(AzureEncryption.mapping),
"encryption_settings_collection": S("properties", "encryptionSettingsCollection")
>> Bend(AzureEncryptionSettingsCollection.mapping),
"extended_location": S("extendedLocation") >> Bend(AzureExtendedLocation.mapping),
"hyper_v_generation": S("properties", "hyperVGeneration"),
"incremental": S("properties", "incremental"),
"incremental_snapshot_family_id": S("properties", "incrementalSnapshotFamilyId"),
"managed_by": S("managedBy"),
"network_access_policy": S("properties", "networkAccessPolicy"),
"os_type": S("properties", "osType"),
"provisioning_state": S("properties", "provisioningState"),
"public_network_access": S("properties", "publicNetworkAccess"),
"purchase_plan": S("properties", "purchasePlan") >> Bend(AzurePurchasePlan.mapping),
"snapshot_security_profile": S("properties", "securityProfile") >> Bend(AzureDiskSecurityProfile.mapping),
"snapshot_sku": S("sku") >> Bend(AzureSnapshotSku.mapping),
"supported_capabilities": S("properties", "supportedCapabilities") >> Bend(AzureSupportedCapabilities.mapping),
"supports_hibernation": S("properties", "supportsHibernation"),
"time_created": S("properties", "timeCreated"),
"unique_id": S("properties", "uniqueId"),
}
completion_percent: Optional[float] = field(default=None, metadata={'description': 'Percentage complete for the background copy when a resource is created via the copystart operation.'}) # fmt: skip
copy_completion_error: Optional[AzureCopyCompletionError] = field(default=None, metadata={'description': 'Indicates the error details if the background copy of a resource created via the copystart operation fails.'}) # fmt: skip
creation_data: Optional[AzureCreationData] = field(default=None, metadata={'description': 'Data used when creating a disk.'}) # fmt: skip
data_access_auth_mode: Optional[str] = field(default=None, metadata={'description': 'Additional authentication requirements when exporting or uploading to a disk or snapshot.'}) # fmt: skip
disk_access_id: Optional[str] = field(default=None, metadata={'description': 'Arm id of the diskaccess resource for using private endpoints on disks.'}) # fmt: skip
disk_size_bytes: Optional[int] = field(default=None, metadata={'description': 'The size of the disk in bytes. This field is read only.'}) # fmt: skip
disk_size_gb: Optional[int] = field(default=None, metadata={'description': 'If creationdata. Createoption is empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running vm, and can only increase the disk s size.'}) # fmt: skip
disk_state: Optional[str] = field(default=None, metadata={'description': 'This enumerates the possible state of the disk.'}) # fmt: skip
snapshot_encryption: Optional[AzureEncryption] = field(default=None, metadata={'description': 'Encryption at rest settings for disk or snapshot.'}) # fmt: skip
encryption_settings_collection: Optional[AzureEncryptionSettingsCollection] = field(default=None, metadata={'description': 'Encryption settings for disk or snapshot.'}) # fmt: skip
extended_location: Optional[AzureExtendedLocation] = field(default=None, metadata={'description': 'The complex type of the extended location.'}) # fmt: skip
hyper_v_generation: Optional[str] = field(default=None, metadata={'description': 'The hypervisor generation of the virtual machine. Applicable to os disks only.'}) # fmt: skip
incremental: Optional[bool] = field(default=None, metadata={'description': 'Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed.'}) # fmt: skip
incremental_snapshot_family_id: Optional[str] = field(default=None, metadata={'description': 'Incremental snapshots for a disk share an incremental snapshot family id. The get page range diff api can only be called on incremental snapshots with the same family id.'}) # fmt: skip
managed_by: Optional[str] = field(default=None, metadata={"description": "Unused. Always null."})
network_access_policy: Optional[str] = field(default=None, metadata={'description': 'Policy for accessing the disk via network.'}) # fmt: skip
os_type: Optional[str] = field(default=None, metadata={"description": "The operating system type."})
provisioning_state: Optional[str] = field(default=None, metadata={"description": "The disk provisioning state."})
public_network_access: Optional[str] = field(default=None, metadata={'description': 'Policy for controlling export on the disk.'}) # fmt: skip
purchase_plan: Optional[AzurePurchasePlan] = field(default=None, metadata={'description': 'Used for establishing the purchase context of any 3rd party artifact through marketplace.'}) # fmt: skip
snapshot_security_profile: Optional[AzureDiskSecurityProfile] = field(default=None, metadata={'description': 'Contains the security related information for the resource.'}) # fmt: skip
snapshot_sku: Optional[AzureSnapshotSku] = field(default=None, metadata={'description': 'The snapshots sku name. Can be standard_lrs, premium_lrs, or standard_zrs. This is an optional parameter for incremental snapshot and the default behavior is the sku will be set to the same sku as the previous snapshot.'}) # fmt: skip
supported_capabilities: Optional[AzureSupportedCapabilities] = field(default=None, metadata={'description': 'List of supported capabilities persisted on the disk resource for vm use.'}) # fmt: skip
supports_hibernation: Optional[bool] = field(default=None, metadata={'description': 'Indicates the os on a snapshot supports hibernation.'}) # fmt: skip
time_created: Optional[datetime] = field(default=None, metadata={'description': 'The time when the snapshot was created.'}) # fmt: skip
unique_id: Optional[str] = field(default=None, metadata={"description": "Unique guid identifying the resource."})
@define(eq=False, slots=False)
class AzureSshPublicKeyResource(AzureResource):
kind: ClassVar[str] = "azure_ssh_public_key_resource"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2023-03-01",
path="/subscriptions/{subscriptionId}/providers/Microsoft.Compute/sshPublicKeys",
path_parameters=["subscriptionId"],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": K(None),
"mtime": K(None),
"atime": K(None),
"properties": S("properties", "publicKey"),
}
properties: Optional[str] = field(default=None, metadata={"description": "Properties of the ssh public key."})
@define(eq=False, slots=False)
class AzurePlan:
kind: ClassVar[str] = "azure_plan"
mapping: ClassVar[Dict[str, Bender]] = {
"name": S("name"),
"product": S("product"),
"promotion_code": S("promotionCode"),
"publisher": S("publisher"),
}
name: Optional[str] = field(default=None, metadata={"description": "The plan id."})
product: Optional[str] = field(default=None, metadata={'description': 'Specifies the product of the image from the marketplace. This is the same value as offer under the imagereference element.'}) # fmt: skip
promotion_code: Optional[str] = field(default=None, metadata={"description": "The promotion code."})
publisher: Optional[str] = field(default=None, metadata={"description": "The publisher id."})
@define(eq=False, slots=False)
class AzureImageReference(AzureSubResource):
kind: ClassVar[str] = "azure_image_reference"
mapping: ClassVar[Dict[str, Bender]] = {
"community_gallery_image_id": S("communityGalleryImageId"),
"exact_version": S("exactVersion"),
"offer": S("offer"),
"publisher": S("publisher"),
"shared_gallery_image_id": S("sharedGalleryImageId"),
"image_reference_sku": S("sku"),
"version": S("version"),
}
community_gallery_image_id: Optional[str] = field(default=None, metadata={'description': 'Specified the community gallery image unique id for vm deployment. This can be fetched from community gallery image get call.'}) # fmt: skip
exact_version: Optional[str] = field(default=None, metadata={'description': 'Specifies in decimal numbers, the version of platform image or marketplace image used to create the virtual machine. This readonly field differs from version , only if the value specified in version field is latest.'}) # fmt: skip
offer: Optional[str] = field(default=None, metadata={'description': 'Specifies the offer of the platform image or marketplace image used to create the virtual machine.'}) # fmt: skip
publisher: Optional[str] = field(default=None, metadata={"description": "The image publisher."})
shared_gallery_image_id: Optional[str] = field(default=None, metadata={'description': 'Specified the shared gallery image unique id for vm deployment. This can be fetched from shared gallery image get call.'}) # fmt: skip
image_reference_sku: Optional[str] = field(default=None, metadata={"description": "The image sku."})
version: Optional[str] = field(default=None, metadata={'description': 'Specifies the version of the platform image or marketplace image used to create the virtual machine. The allowed formats are major. Minor. Build or latest. Major, minor, and build are decimal numbers. Specify latest to use the latest version of an image available at deploy time. Even if you use latest , the vm image will not automatically update after deploy time even if a new version becomes available. Please do not use field version for gallery image deployment, gallery image should always use id field for deployment, to use latest version of gallery image, just set /subscriptions/{subscriptionid}/resourcegroups/{resourcegroupname}/providers/microsoft. Compute/galleries/{galleryname}/images/{imagename} in the id field without version input.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureDiffDiskSettings:
kind: ClassVar[str] = "azure_diff_disk_settings"
mapping: ClassVar[Dict[str, Bender]] = {"option": S("option"), "placement": S("placement")}
option: Optional[str] = field(default=None, metadata={'description': 'Specifies the ephemeral disk option for operating system disk.'}) # fmt: skip
placement: Optional[str] = field(default=None, metadata={'description': 'Specifies the ephemeral disk placement for operating system disk. This property can be used by user in the request to choose the location i. E, cache disk or resource disk space for ephemeral os disk provisioning. For more information on ephemeral os disk size requirements, please refer ephemeral os disk size requirements for windows vm at https://docs. Microsoft. Com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and linux vm at https://docs. Microsoft. Com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureOSDisk:
kind: ClassVar[str] = "azure_os_disk"
mapping: ClassVar[Dict[str, Bender]] = {
"caching": S("caching"),
"create_option": S("createOption"),
"delete_option": S("deleteOption"),
"diff_disk_settings": S("diffDiskSettings") >> Bend(AzureDiffDiskSettings.mapping),
"disk_size_gb": S("diskSizeGB"),
"encryption_settings": S("encryptionSettings") >> Bend(AzureDiskEncryptionSettings.mapping),
"image": S("image", "uri"),
"managed_disk": S("managedDisk") >> Bend(AzureManagedDiskParameters.mapping),
"name": S("name"),
"os_type": S("osType"),
"vhd": S("vhd", "uri"),
"write_accelerator_enabled": S("writeAcceleratorEnabled"),
}
caching: Optional[str] = field(default=None, metadata={'description': 'Specifies the caching requirements. Possible values are: **none,** **readonly,** **readwrite. ** the default values are: **none for standard storage. Readonly for premium storage**.'}) # fmt: skip
create_option: Optional[str] = field(default=None, metadata={'description': 'Specifies how the virtual machine should be created. Possible values are: **attach. ** this value is used when you are using a specialized disk to create the virtual machine. **fromimage. ** this value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imagereference element described above. If you are using a marketplace image, you also use the plan element previously described.'}) # fmt: skip
delete_option: Optional[str] = field(default=None, metadata={'description': 'Specifies the behavior of the managed disk when the vm gets deleted, for example whether the managed disk is deleted or detached. Supported values are: **delete. ** if this value is used, the managed disk is deleted when vm gets deleted. **detach. ** if this value is used, the managed disk is retained after vm gets deleted. Minimum api-version: 2021-03-01.'}) # fmt: skip
diff_disk_settings: Optional[AzureDiffDiskSettings] = field(default=None, metadata={'description': 'Describes the parameters of ephemeral disk settings that can be specified for operating system disk. **note:** the ephemeral disk settings can only be specified for managed disk.'}) # fmt: skip
disk_size_gb: Optional[int] = field(default=None, metadata={'description': 'Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image. The property disksizegb is the number of bytes x 1024^3 for the disk and the value cannot be larger than 1023.'}) # fmt: skip
encryption_settings: Optional[AzureDiskEncryptionSettings] = field(default=None, metadata={'description': 'Describes a encryption settings for a disk.'}) # fmt: skip
image: Optional[str] = field(default=None, metadata={"description": "Describes the uri of a disk."})
managed_disk: Optional[AzureManagedDiskParameters] = field(default=None, metadata={'description': 'The parameters of a managed disk.'}) # fmt: skip
name: Optional[str] = field(default=None, metadata={"description": "The disk name."})
os_type: Optional[str] = field(default=None, metadata={'description': 'This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd. Possible values are: **windows,** **linux. **.'}) # fmt: skip
vhd: Optional[str] = field(default=None, metadata={"description": "Describes the uri of a disk."})
write_accelerator_enabled: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether writeaccelerator should be enabled or disabled on the disk.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureDataDisk:
kind: ClassVar[str] = "azure_data_disk"
mapping: ClassVar[Dict[str, Bender]] = {
"caching": S("caching"),
"create_option": S("createOption"),
"delete_option": S("deleteOption"),
"detach_option": S("detachOption"),
"disk_iops_read_write": S("diskIOPSReadWrite"),
"disk_m_bps_read_write": S("diskMBpsReadWrite"),
"disk_size_gb": S("diskSizeGB"),
"image": S("image", "uri"),
"lun": S("lun"),
"managed_disk": S("managedDisk") >> Bend(AzureManagedDiskParameters.mapping),
"name": S("name"),
"to_be_detached": S("toBeDetached"),
"vhd": S("vhd", "uri"),
"write_accelerator_enabled": S("writeAcceleratorEnabled"),
}
caching: Optional[str] = field(default=None, metadata={'description': 'Specifies the caching requirements. Possible values are: **none,** **readonly,** **readwrite. ** the default values are: **none for standard storage. Readonly for premium storage**.'}) # fmt: skip
create_option: Optional[str] = field(default=None, metadata={'description': 'Specifies how the virtual machine should be created. Possible values are: **attach. ** this value is used when you are using a specialized disk to create the virtual machine. **fromimage. ** this value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imagereference element described above. If you are using a marketplace image, you also use the plan element previously described.'}) # fmt: skip
delete_option: Optional[str] = field(default=None, metadata={'description': 'Specifies the behavior of the managed disk when the vm gets deleted, for example whether the managed disk is deleted or detached. Supported values are: **delete. ** if this value is used, the managed disk is deleted when vm gets deleted. **detach. ** if this value is used, the managed disk is retained after vm gets deleted. Minimum api-version: 2021-03-01.'}) # fmt: skip
detach_option: Optional[str] = field(default=None, metadata={'description': 'Specifies the detach behavior to be used while detaching a disk or which is already in the process of detachment from the virtual machine. Supported values are: **forcedetach. ** detachoption: **forcedetach** is applicable only for managed data disks. If a previous detachment attempt of the data disk did not complete due to an unexpected failure from the virtual machine and the disk is still not released then use force-detach as a last resort option to detach the disk forcibly from the vm. All writes might not have been flushed when using this detach behavior. **this feature is still in preview** mode and is not supported for virtualmachinescaleset. To force-detach a data disk update tobedetached to true along with setting detachoption: forcedetach.'}) # fmt: skip
disk_iops_read_write: Optional[int] = field(default=None, metadata={'description': 'Specifies the read-write iops for the managed disk when storageaccounttype is ultrassd_lrs. Returned only for virtualmachine scaleset vm disks. Can be updated only via updates to the virtualmachine scale set.'}) # fmt: skip
disk_m_bps_read_write: Optional[int] = field(default=None, metadata={'description': 'Specifies the bandwidth in mb per second for the managed disk when storageaccounttype is ultrassd_lrs. Returned only for virtualmachine scaleset vm disks. Can be updated only via updates to the virtualmachine scale set.'}) # fmt: skip
disk_size_gb: Optional[int] = field(default=None, metadata={'description': 'Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image. The property disksizegb is the number of bytes x 1024^3 for the disk and the value cannot be larger than 1023.'}) # fmt: skip
image: Optional[str] = field(default=None, metadata={"description": "Describes the uri of a disk."})
lun: Optional[int] = field(default=None, metadata={'description': 'Specifies the logical unit number of the data disk. This value is used to identify data disks within the vm and therefore must be unique for each data disk attached to a vm.'}) # fmt: skip
managed_disk: Optional[AzureManagedDiskParameters] = field(default=None, metadata={'description': 'The parameters of a managed disk.'}) # fmt: skip
name: Optional[str] = field(default=None, metadata={"description": "The disk name."})
to_be_detached: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether the data disk is in process of detachment from the virtualmachine/virtualmachinescaleset.'}) # fmt: skip
vhd: Optional[str] = field(default=None, metadata={"description": "Describes the uri of a disk."})
write_accelerator_enabled: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether writeaccelerator should be enabled or disabled on the disk.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureStorageProfile:
kind: ClassVar[str] = "azure_storage_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"data_disks": S("dataDisks") >> ForallBend(AzureDataDisk.mapping),
"disk_controller_type": S("diskControllerType"),
"image_reference": S("imageReference") >> Bend(AzureImageReference.mapping),
"os_disk": S("osDisk") >> Bend(AzureOSDisk.mapping),
}
data_disks: Optional[List[AzureDataDisk]] = field(default=None, metadata={'description': 'Specifies the parameters that are used to add a data disk to a virtual machine. For more information about disks, see [about disks and vhds for azure virtual machines](https://docs. Microsoft. Com/azure/virtual-machines/managed-disks-overview).'}) # fmt: skip
disk_controller_type: Optional[str] = field(default=None, metadata={'description': 'Specifies the disk controller type configured for the vm and virtualmachinescaleset. This property is only supported for virtual machines whose operating system disk and vm sku supports generation 2 (https://docs. Microsoft. Com/en-us/azure/virtual-machines/generation-2), please check the hypervgenerations capability returned as part of vm sku capabilities in the response of microsoft. Compute skus api for the region contains v2 (https://docs. Microsoft. Com/rest/api/compute/resourceskus/list). For more information about disk controller types supported please refer to https://aka. Ms/azure-diskcontrollertypes.'}) # fmt: skip
image_reference: Optional[AzureImageReference] = field(default=None, metadata={'description': 'Specifies information about the image to use. You can specify information about platform images, marketplace images, or virtual machine images. This element is required when you want to use a platform image, marketplace image, or virtual machine image, but is not used in other creation operations. Note: image reference publisher and offer can only be set when you create the scale set.'}) # fmt: skip
os_disk: Optional[AzureOSDisk] = field(default=None, metadata={'description': 'Specifies information about the operating system disk used by the virtual machine. For more information about disks, see [about disks and vhds for azure virtual machines](https://docs. Microsoft. Com/azure/virtual-machines/managed-disks-overview).'}) # fmt: skip
@define(eq=False, slots=False)
class AzureAdditionalCapabilities:
kind: ClassVar[str] = "azure_additional_capabilities"
mapping: ClassVar[Dict[str, Bender]] = {
"hibernation_enabled": S("hibernationEnabled"),
"ultra_ssd_enabled": S("ultraSSDEnabled"),
}
hibernation_enabled: Optional[bool] = field(default=None, metadata={'description': 'The flag that enables or disables hibernation capability on the vm.'}) # fmt: skip
ultra_ssd_enabled: Optional[bool] = field(default=None, metadata={'description': 'The flag that enables or disables a capability to have one or more managed data disks with ultrassd_lrs storage account type on the vm or vmss. Managed disks with storage account type ultrassd_lrs can be added to a virtual machine or virtual machine scale set only if this property is enabled.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureNetworkInterfaceReference(AzureSubResource):
kind: ClassVar[str] = "azure_network_interface_reference"
mapping: ClassVar[Dict[str, Bender]] = {
"delete_option": S("properties", "deleteOption"),
"primary": S("properties", "primary"),
}
delete_option: Optional[str] = field(default=None, metadata={'description': 'Specify what happens to the network interface when the vm is deleted.'}) # fmt: skip
primary: Optional[bool] = field(default=None, metadata={'description': 'Specifies the primary network interface in case the virtual machine has more than 1 network interface.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineNetworkInterfaceDnsSettingsConfiguration:
kind: ClassVar[str] = "azure_virtual_machine_network_interface_dns_settings_configuration"
mapping: ClassVar[Dict[str, Bender]] = {"dns_servers": S("dnsServers")}
dns_servers: Optional[List[str]] = field(
default=None, metadata={"description": "List of dns servers ip addresses."}
)
@define(eq=False, slots=False)
class AzureVirtualMachineIpTag:
kind: ClassVar[str] = "azure_virtual_machine_ip_tag"
mapping: ClassVar[Dict[str, Bender]] = {"ip_tag_type": S("ipTagType"), "tag": S("tag")}
ip_tag_type: Optional[str] = field(default=None, metadata={'description': 'Ip tag type. Example: firstpartyusage.'}) # fmt: skip
tag: Optional[str] = field(default=None, metadata={'description': 'Ip tag associated with the public ip. Example: sql, storage etc.'}) # fmt: skip
@define(eq=False, slots=False)
class AzurePublicIPAddressSku:
kind: ClassVar[str] = "azure_public_ip_address_sku"
mapping: ClassVar[Dict[str, Bender]] = {"name": S("name"), "tier": S("tier")}
name: Optional[str] = field(default=None, metadata={"description": "Specify public ip sku name."})
tier: Optional[str] = field(default=None, metadata={"description": "Specify public ip sku tier."})
@define(eq=False, slots=False)
class AzureVirtualMachinePublicIPAddressConfiguration:
kind: ClassVar[str] = "azure_virtual_machine_public_ip_address_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"delete_option": S("properties", "deleteOption"),
"dns_settings": S("properties", "dnsSettings", "domainNameLabel"),
"idle_timeout_in_minutes": S("properties", "idleTimeoutInMinutes"),
"ip_tags": S("properties", "ipTags") >> ForallBend(AzureVirtualMachineIpTag.mapping),
"name": S("name"),
"public_ip_address_version": S("properties", "publicIPAddressVersion"),
"public_ip_allocation_method": S("properties", "publicIPAllocationMethod"),
"public_ip_prefix": S("properties", "publicIPPrefix", "id"),
"sku": S("sku") >> Bend(AzurePublicIPAddressSku.mapping),
}
delete_option: Optional[str] = field(default=None, metadata={'description': 'Specify what happens to the public ip address when the vm is deleted.'}) # fmt: skip
dns_settings: Optional[str] = field(default=None, metadata={'description': 'Describes a virtual machines network configuration s dns settings.'}) # fmt: skip
idle_timeout_in_minutes: Optional[int] = field(default=None, metadata={'description': 'The idle timeout of the public ip address.'}) # fmt: skip
ip_tags: Optional[List[AzureVirtualMachineIpTag]] = field(default=None, metadata={'description': 'The list of ip tags associated with the public ip address.'}) # fmt: skip
name: Optional[str] = field(default=None, metadata={"description": "The publicip address configuration name."})
public_ip_address_version: Optional[str] = field(default=None, metadata={'description': 'Available from api-version 2019-07-01 onwards, it represents whether the specific ipconfiguration is ipv4 or ipv6. Default is taken as ipv4. Possible values are: ipv4 and ipv6.'}) # fmt: skip
public_ip_allocation_method: Optional[str] = field(default=None, metadata={'description': 'Specify the public ip allocation type.'}) # fmt: skip
public_ip_prefix: Optional[str] = field(default=None, metadata={"description": ""})
sku: Optional[AzurePublicIPAddressSku] = field(default=None, metadata={'description': 'Describes the public ip sku. It can only be set with orchestrationmode as flexible.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineNetworkInterfaceIPConfiguration:
kind: ClassVar[str] = "azure_virtual_machine_network_interface_ip_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"application_gateway_backend_address_pools": S("properties")
>> S("applicationGatewayBackendAddressPools", default=[])
>> ForallBend(S("id")),
"application_security_groups": S("properties")
>> S("applicationSecurityGroups", default=[])
>> ForallBend(S("id")),
"load_balancer_backend_address_pools": S("properties")
>> S("loadBalancerBackendAddressPools", default=[])
>> ForallBend(S("id")),
"name": S("name"),
"primary": S("properties", "primary"),
"private_ip_address_version": S("properties", "privateIPAddressVersion"),
"public_ip_address_configuration": S("properties", "publicIPAddressConfiguration")
>> Bend(AzureVirtualMachinePublicIPAddressConfiguration.mapping),
"subnet": S("properties", "subnet", "id"),
}
application_gateway_backend_address_pools: Optional[List[str]] = field(default=None, metadata={'description': 'Specifies an array of references to backend address pools of application gateways. A virtual machine can reference backend address pools of multiple application gateways. Multiple virtual machines cannot use the same application gateway.'}) # fmt: skip
application_security_groups: Optional[List[str]] = field(default=None, metadata={'description': 'Specifies an array of references to application security group.'}) # fmt: skip
load_balancer_backend_address_pools: Optional[List[str]] = field(default=None, metadata={'description': 'Specifies an array of references to backend address pools of load balancers. A virtual machine can reference backend address pools of one public and one internal load balancer. [multiple virtual machines cannot use the same basic sku load balancer].'}) # fmt: skip
name: Optional[str] = field(default=None, metadata={"description": "The ip configuration name."})
primary: Optional[bool] = field(default=None, metadata={'description': 'Specifies the primary network interface in case the virtual machine has more than 1 network interface.'}) # fmt: skip
private_ip_address_version: Optional[str] = field(default=None, metadata={'description': 'Available from api-version 2017-03-30 onwards, it represents whether the specific ipconfiguration is ipv4 or ipv6. Default is taken as ipv4. Possible values are: ipv4 and ipv6.'}) # fmt: skip
public_ip_address_configuration: Optional[AzureVirtualMachinePublicIPAddressConfiguration] = field(default=None, metadata={'description': 'Describes a virtual machines ip configuration s publicipaddress configuration.'}) # fmt: skip
subnet: Optional[str] = field(default=None, metadata={"description": ""})
@define(eq=False, slots=False)
class AzureVirtualMachineNetworkInterfaceConfiguration:
kind: ClassVar[str] = "azure_virtual_machine_network_interface_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"delete_option": S("properties", "deleteOption"),
"disable_tcp_state_tracking": S("properties", "disableTcpStateTracking"),
"dns_settings": S("properties", "dnsSettings")
>> Bend(AzureVirtualMachineNetworkInterfaceDnsSettingsConfiguration.mapping),
"dscp_configuration": S("properties", "dscpConfiguration", "id"),
"enable_accelerated_networking": S("properties", "enableAcceleratedNetworking"),
"enable_fpga": S("properties", "enableFpga"),
"enable_ip_forwarding": S("properties", "enableIPForwarding"),
"ip_configurations": S("properties", "ipConfigurations")
>> ForallBend(AzureVirtualMachineNetworkInterfaceIPConfiguration.mapping),
"name": S("name"),
"network_security_group": S("properties", "networkSecurityGroup", "id"),
"primary": S("properties", "primary"),
}
delete_option: Optional[str] = field(default=None, metadata={'description': 'Specify what happens to the network interface when the vm is deleted.'}) # fmt: skip
disable_tcp_state_tracking: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether the network interface is disabled for tcp state tracking.'}) # fmt: skip
dns_settings: Optional[AzureVirtualMachineNetworkInterfaceDnsSettingsConfiguration] = field(default=None, metadata={'description': 'Describes a virtual machines network configuration s dns settings.'}) # fmt: skip
dscp_configuration: Optional[str] = field(default=None, metadata={"description": ""})
enable_accelerated_networking: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether the network interface is accelerated networking-enabled.'}) # fmt: skip
enable_fpga: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether the network interface is fpga networking-enabled.'}) # fmt: skip
enable_ip_forwarding: Optional[bool] = field(default=None, metadata={'description': 'Whether ip forwarding enabled on this nic.'}) # fmt: skip
ip_configurations: Optional[List[AzureVirtualMachineNetworkInterfaceIPConfiguration]] = field(default=None, metadata={'description': 'Specifies the ip configurations of the network interface.'}) # fmt: skip
name: Optional[str] = field(default=None, metadata={"description": "The network interface configuration name."})
network_security_group: Optional[str] = field(default=None, metadata={"description": ""})
primary: Optional[bool] = field(default=None, metadata={'description': 'Specifies the primary network interface in case the virtual machine has more than 1 network interface.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureNetworkProfile:
kind: ClassVar[str] = "azure_network_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"network_api_version": S("networkApiVersion"),
"network_interface_configurations": S("networkInterfaceConfigurations")
>> ForallBend(AzureVirtualMachineNetworkInterfaceConfiguration.mapping),
"network_interfaces": S("networkInterfaces") >> ForallBend(AzureNetworkInterfaceReference.mapping),
}
network_api_version: Optional[str] = field(default=None, metadata={'description': 'Specifies the microsoft. Network api version used when creating networking resources in the network interface configurations.'}) # fmt: skip
network_interface_configurations: Optional[List[AzureVirtualMachineNetworkInterfaceConfiguration]] = field(default=None, metadata={'description': 'Specifies the networking configurations that will be used to create the virtual machine networking resources.'}) # fmt: skip
network_interfaces: Optional[List[AzureNetworkInterfaceReference]] = field(default=None, metadata={'description': 'Specifies the list of resource ids for the network interfaces associated with the virtual machine.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineExtensionHandlerInstanceView:
kind: ClassVar[str] = "azure_virtual_machine_extension_handler_instance_view"
mapping: ClassVar[Dict[str, Bender]] = {
"status": S("status") >> Bend(AzureInstanceViewStatus.mapping),
"type": S("type"),
"type_handler_version": S("typeHandlerVersion"),
}
status: Optional[AzureInstanceViewStatus] = field(default=None, metadata={"description": "Instance view status."})
type: Optional[str] = field(default=None, metadata={'description': 'Specifies the type of the extension; an example is customscriptextension.'}) # fmt: skip
type_handler_version: Optional[str] = field(default=None, metadata={'description': 'Specifies the version of the script handler.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineAgentInstanceView:
kind: ClassVar[str] = "azure_virtual_machine_agent_instance_view"
mapping: ClassVar[Dict[str, Bender]] = {
"extension_handlers": S("extensionHandlers")
>> ForallBend(AzureVirtualMachineExtensionHandlerInstanceView.mapping),
"statuses": S("statuses") >> ForallBend(AzureInstanceViewStatus.mapping),
"vm_agent_version": S("vmAgentVersion"),
}
extension_handlers: Optional[List[AzureVirtualMachineExtensionHandlerInstanceView]] = field(default=None, metadata={'description': 'The virtual machine extension handler instance view.'}) # fmt: skip
statuses: Optional[List[AzureInstanceViewStatus]] = field(default=None, metadata={'description': 'The resource status information.'}) # fmt: skip
vm_agent_version: Optional[str] = field(default=None, metadata={"description": "The vm agent full version."})
@define(eq=False, slots=False)
class AzureMaintenanceRedeployStatus:
kind: ClassVar[str] = "azure_maintenance_redeploy_status"
mapping: ClassVar[Dict[str, Bender]] = {
"is_customer_initiated_maintenance_allowed": S("isCustomerInitiatedMaintenanceAllowed"),
"last_operation_message": S("lastOperationMessage"),
"last_operation_result_code": S("lastOperationResultCode"),
"maintenance_window_end_time": S("maintenanceWindowEndTime"),
"maintenance_window_start_time": S("maintenanceWindowStartTime"),
"pre_maintenance_window_end_time": S("preMaintenanceWindowEndTime"),
"pre_maintenance_window_start_time": S("preMaintenanceWindowStartTime"),
}
is_customer_initiated_maintenance_allowed: Optional[bool] = field(default=None, metadata={'description': 'True, if customer is allowed to perform maintenance.'}) # fmt: skip
last_operation_message: Optional[str] = field(default=None, metadata={'description': 'Message returned for the last maintenance operation.'}) # fmt: skip
last_operation_result_code: Optional[str] = field(default=None, metadata={'description': 'The last maintenance operation result code.'}) # fmt: skip
maintenance_window_end_time: Optional[datetime] = field(default=None, metadata={'description': 'End time for the maintenance window.'}) # fmt: skip
maintenance_window_start_time: Optional[datetime] = field(default=None, metadata={'description': 'Start time for the maintenance window.'}) # fmt: skip
pre_maintenance_window_end_time: Optional[datetime] = field(default=None, metadata={'description': 'End time for the pre maintenance window.'}) # fmt: skip
pre_maintenance_window_start_time: Optional[datetime] = field(default=None, metadata={'description': 'Start time for the pre maintenance window.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureDiskInstanceView:
kind: ClassVar[str] = "azure_disk_instance_view"
mapping: ClassVar[Dict[str, Bender]] = {
"encryption_settings": S("encryptionSettings") >> ForallBend(AzureDiskEncryptionSettings.mapping),
"name": S("name"),
"statuses": S("statuses") >> ForallBend(AzureInstanceViewStatus.mapping),
}
encryption_settings: Optional[List[AzureDiskEncryptionSettings]] = field(default=None, metadata={'description': 'Specifies the encryption settings for the os disk. Minimum api-version: 2015-06-15.'}) # fmt: skip
name: Optional[str] = field(default=None, metadata={"description": "The disk name."})
statuses: Optional[List[AzureInstanceViewStatus]] = field(default=None, metadata={'description': 'The resource status information.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineExtensionInstanceView:
kind: ClassVar[str] = "azure_virtual_machine_extension_instance_view"
mapping: ClassVar[Dict[str, Bender]] = {
"name": S("name"),
"statuses": S("statuses") >> ForallBend(AzureInstanceViewStatus.mapping),
"substatuses": S("substatuses") >> ForallBend(AzureInstanceViewStatus.mapping),
"type": S("type"),
"type_handler_version": S("typeHandlerVersion"),
}
name: Optional[str] = field(default=None, metadata={"description": "The virtual machine extension name."})
statuses: Optional[List[AzureInstanceViewStatus]] = field(default=None, metadata={'description': 'The resource status information.'}) # fmt: skip
substatuses: Optional[List[AzureInstanceViewStatus]] = field(default=None, metadata={'description': 'The resource status information.'}) # fmt: skip
type: Optional[str] = field(default=None, metadata={'description': 'Specifies the type of the extension; an example is customscriptextension.'}) # fmt: skip
type_handler_version: Optional[str] = field(default=None, metadata={'description': 'Specifies the version of the script handler.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineHealthStatus:
kind: ClassVar[str] = "azure_virtual_machine_health_status"
mapping: ClassVar[Dict[str, Bender]] = {"status": S("status") >> Bend(AzureInstanceViewStatus.mapping)}
status: Optional[AzureInstanceViewStatus] = field(default=None, metadata={"description": "Instance view status."})
@define(eq=False, slots=False)
class AzureBootDiagnosticsInstanceView:
kind: ClassVar[str] = "azure_boot_diagnostics_instance_view"
mapping: ClassVar[Dict[str, Bender]] = {
"console_screenshot_blob_uri": S("consoleScreenshotBlobUri"),
"serial_console_log_blob_uri": S("serialConsoleLogBlobUri"),
"status": S("status") >> Bend(AzureInstanceViewStatus.mapping),
}
console_screenshot_blob_uri: Optional[str] = field(default=None, metadata={'description': 'The console screenshot blob uri. **note:** this will **not** be set if boot diagnostics is currently enabled with managed storage.'}) # fmt: skip
serial_console_log_blob_uri: Optional[str] = field(default=None, metadata={'description': 'The serial console log blob uri. **note:** this will **not** be set if boot diagnostics is currently enabled with managed storage.'}) # fmt: skip
status: Optional[AzureInstanceViewStatus] = field(default=None, metadata={"description": "Instance view status."})
@define(eq=False, slots=False)
class AzureAvailablePatchSummary:
kind: ClassVar[str] = "azure_available_patch_summary"
mapping: ClassVar[Dict[str, Bender]] = {
"assessment_activity_id": S("assessmentActivityId"),
"critical_and_security_patch_count": S("criticalAndSecurityPatchCount"),
"error": S("error") >> Bend(AzureApiError.mapping),
"last_modified_time": S("lastModifiedTime"),
"other_patch_count": S("otherPatchCount"),
"reboot_pending": S("rebootPending"),
"start_time": S("startTime"),
"status": S("status"),
}
assessment_activity_id: Optional[str] = field(default=None, metadata={'description': 'The activity id of the operation that produced this result. It is used to correlate across crp and extension logs.'}) # fmt: skip
critical_and_security_patch_count: Optional[int] = field(default=None, metadata={'description': 'The number of critical or security patches that have been detected as available and not yet installed.'}) # fmt: skip
error: Optional[AzureApiError] = field(default=None, metadata={"description": "Api error."})
last_modified_time: Optional[datetime] = field(default=None, metadata={'description': 'The utc timestamp when the operation began.'}) # fmt: skip
other_patch_count: Optional[int] = field(default=None, metadata={'description': 'The number of all available patches excluding critical and security.'}) # fmt: skip
reboot_pending: Optional[bool] = field(default=None, metadata={'description': 'The overall reboot status of the vm. It will be true when partially installed patches require a reboot to complete installation but the reboot has not yet occurred.'}) # fmt: skip
start_time: Optional[datetime] = field(default=None, metadata={'description': 'The utc timestamp when the operation began.'}) # fmt: skip
status: Optional[str] = field(default=None, metadata={'description': 'The overall success or failure status of the operation. It remains inprogress until the operation completes. At that point it will become unknown , failed , succeeded , or completedwithwarnings.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureLastPatchInstallationSummary:
kind: ClassVar[str] = "azure_last_patch_installation_summary"
mapping: ClassVar[Dict[str, Bender]] = {
"error": S("error") >> Bend(AzureApiError.mapping),
"excluded_patch_count": S("excludedPatchCount"),
"failed_patch_count": S("failedPatchCount"),
"installation_activity_id": S("installationActivityId"),
"installed_patch_count": S("installedPatchCount"),
"last_modified_time": S("lastModifiedTime"),
"maintenance_window_exceeded": S("maintenanceWindowExceeded"),
"not_selected_patch_count": S("notSelectedPatchCount"),
"pending_patch_count": S("pendingPatchCount"),
"start_time": S("startTime"),
"status": S("status"),
}
error: Optional[AzureApiError] = field(default=None, metadata={"description": "Api error."})
excluded_patch_count: Optional[int] = field(default=None, metadata={'description': 'The number of all available patches but excluded explicitly by a customer-specified exclusion list match.'}) # fmt: skip
failed_patch_count: Optional[int] = field(default=None, metadata={'description': 'The count of patches that failed installation.'}) # fmt: skip
installation_activity_id: Optional[str] = field(default=None, metadata={'description': 'The activity id of the operation that produced this result. It is used to correlate across crp and extension logs.'}) # fmt: skip
installed_patch_count: Optional[int] = field(default=None, metadata={'description': 'The count of patches that successfully installed.'}) # fmt: skip
last_modified_time: Optional[datetime] = field(default=None, metadata={'description': 'The utc timestamp when the operation began.'}) # fmt: skip
maintenance_window_exceeded: Optional[bool] = field(default=None, metadata={'description': 'Describes whether the operation ran out of time before it completed all its intended actions.'}) # fmt: skip
not_selected_patch_count: Optional[int] = field(default=None, metadata={'description': 'The number of all available patches but not going to be installed because it didn t match a classification or inclusion list entry.'}) # fmt: skip
pending_patch_count: Optional[int] = field(default=None, metadata={'description': 'The number of all available patches expected to be installed over the course of the patch installation operation.'}) # fmt: skip
start_time: Optional[datetime] = field(default=None, metadata={'description': 'The utc timestamp when the operation began.'}) # fmt: skip
status: Optional[str] = field(default=None, metadata={'description': 'The overall success or failure status of the operation. It remains inprogress until the operation completes. At that point it will become unknown , failed , succeeded , or completedwithwarnings.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachinePatchStatus:
kind: ClassVar[str] = "azure_virtual_machine_patch_status"
mapping: ClassVar[Dict[str, Bender]] = {
"available_patch_summary": S("availablePatchSummary") >> Bend(AzureAvailablePatchSummary.mapping),
"configuration_statuses": S("configurationStatuses") >> ForallBend(AzureInstanceViewStatus.mapping),
"last_patch_installation_summary": S("lastPatchInstallationSummary")
>> Bend(AzureLastPatchInstallationSummary.mapping),
}
available_patch_summary: Optional[AzureAvailablePatchSummary] = field(default=None, metadata={'description': 'Describes the properties of an virtual machine instance view for available patch summary.'}) # fmt: skip
configuration_statuses: Optional[List[AzureInstanceViewStatus]] = field(default=None, metadata={'description': 'The enablement status of the specified patchmode.'}) # fmt: skip
last_patch_installation_summary: Optional[AzureLastPatchInstallationSummary] = field(default=None, metadata={'description': 'Describes the properties of the last installed patch summary.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineInstanceView:
kind: ClassVar[str] = "azure_virtual_machine_instance_view"
mapping: ClassVar[Dict[str, Bender]] = {
"assigned_host": S("assignedHost"),
"boot_diagnostics": S("bootDiagnostics") >> Bend(AzureBootDiagnosticsInstanceView.mapping),
"computer_name": S("computerName"),
"disks": S("disks") >> ForallBend(AzureDiskInstanceView.mapping),
"extensions": S("extensions") >> ForallBend(AzureVirtualMachineExtensionInstanceView.mapping),
"hyper_v_generation": S("hyperVGeneration"),
"maintenance_redeploy_status": S("maintenanceRedeployStatus") >> Bend(AzureMaintenanceRedeployStatus.mapping),
"os_name": S("osName"),
"os_version": S("osVersion"),
"patch_status": S("patchStatus") >> Bend(AzureVirtualMachinePatchStatus.mapping),
"platform_fault_domain": S("platformFaultDomain"),
"platform_update_domain": S("platformUpdateDomain"),
"rdp_thumb_print": S("rdpThumbPrint"),
"statuses": S("statuses") >> ForallBend(AzureInstanceViewStatus.mapping),
"vm_agent": S("vmAgent") >> Bend(AzureVirtualMachineAgentInstanceView.mapping),
"vm_health": S("vmHealth") >> Bend(AzureVirtualMachineHealthStatus.mapping),
}
assigned_host: Optional[str] = field(default=None, metadata={'description': 'Resource id of the dedicated host, on which the virtual machine is allocated through automatic placement, when the virtual machine is associated with a dedicated host group that has automatic placement enabled. Minimum api-version: 2020-06-01.'}) # fmt: skip
boot_diagnostics: Optional[AzureBootDiagnosticsInstanceView] = field(default=None, metadata={'description': 'The instance view of a virtual machine boot diagnostics.'}) # fmt: skip
computer_name: Optional[str] = field(default=None, metadata={'description': 'The computer name assigned to the virtual machine.'}) # fmt: skip
disks: Optional[List[AzureDiskInstanceView]] = field(default=None, metadata={'description': 'The virtual machine disk information.'}) # fmt: skip
extensions: Optional[List[AzureVirtualMachineExtensionInstanceView]] = field(default=None, metadata={'description': 'The extensions information.'}) # fmt: skip
hyper_v_generation: Optional[str] = field(default=None, metadata={'description': 'Specifies the hypervgeneration type associated with a resource.'}) # fmt: skip
maintenance_redeploy_status: Optional[AzureMaintenanceRedeployStatus] = field(default=None, metadata={'description': 'Maintenance operation status.'}) # fmt: skip
os_name: Optional[str] = field(default=None, metadata={'description': 'The operating system running on the virtual machine.'}) # fmt: skip
os_version: Optional[str] = field(default=None, metadata={'description': 'The version of operating system running on the virtual machine.'}) # fmt: skip
patch_status: Optional[AzureVirtualMachinePatchStatus] = field(default=None, metadata={'description': 'The status of virtual machine patch operations.'}) # fmt: skip
platform_fault_domain: Optional[int] = field(default=None, metadata={'description': 'Specifies the fault domain of the virtual machine.'}) # fmt: skip
platform_update_domain: Optional[int] = field(default=None, metadata={'description': 'Specifies the update domain of the virtual machine.'}) # fmt: skip
rdp_thumb_print: Optional[str] = field(default=None, metadata={'description': 'The remote desktop certificate thumbprint.'}) # fmt: skip
statuses: Optional[List[AzureInstanceViewStatus]] = field(default=None, metadata={'description': 'The resource status information.'}) # fmt: skip
vm_agent: Optional[AzureVirtualMachineAgentInstanceView] = field(default=None, metadata={'description': 'The instance view of the vm agent running on the virtual machine.'}) # fmt: skip
vm_health: Optional[AzureVirtualMachineHealthStatus] = field(default=None, metadata={'description': 'The health status of the vm.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureTerminateNotificationProfile:
kind: ClassVar[str] = "azure_terminate_notification_profile"
mapping: ClassVar[Dict[str, Bender]] = {"enable": S("enable"), "not_before_timeout": S("notBeforeTimeout")}
enable: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether the terminate scheduled event is enabled or disabled.'}) # fmt: skip
not_before_timeout: Optional[str] = field(default=None, metadata={'description': 'Configurable length of time a virtual machine being deleted will have to potentially approve the terminate scheduled event before the event is auto approved (timed out). The configuration must be specified in iso 8601 format, the default value is 5 minutes (pt5m).'}) # fmt: skip
@define(eq=False, slots=False)
class AzureOSImageNotificationProfile:
kind: ClassVar[str] = "azure_os_image_notification_profile"
mapping: ClassVar[Dict[str, Bender]] = {"enable": S("enable"), "not_before_timeout": S("notBeforeTimeout")}
enable: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether the os image scheduled event is enabled or disabled.'}) # fmt: skip
not_before_timeout: Optional[str] = field(default=None, metadata={'description': 'Length of time a virtual machine being reimaged or having its os upgraded will have to potentially approve the os image scheduled event before the event is auto approved (timed out). The configuration is specified in iso 8601 format, and the value must be 15 minutes (pt15m).'}) # fmt: skip
@define(eq=False, slots=False)
class AzureScheduledEventsProfile:
kind: ClassVar[str] = "azure_scheduled_events_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"os_image_notification_profile": S("osImageNotificationProfile")
>> Bend(AzureOSImageNotificationProfile.mapping),
"terminate_notification_profile": S("terminateNotificationProfile")
>> Bend(AzureTerminateNotificationProfile.mapping),
}
os_image_notification_profile: Optional[AzureOSImageNotificationProfile] = field(default=None, metadata={'description': ''}) # fmt: skip
terminate_notification_profile: Optional[AzureTerminateNotificationProfile] = field(default=None, metadata={'description': ''}) # fmt: skip
@define(eq=False, slots=False)
class AzureCapacityReservationProfile:
kind: ClassVar[str] = "azure_capacity_reservation_profile"
mapping: ClassVar[Dict[str, Bender]] = {"capacity_reservation_group": S("capacityReservationGroup", "id")}
capacity_reservation_group: Optional[str] = field(default=None, metadata={"description": ""})
@define(eq=False, slots=False)
class AzureVMGalleryApplication:
kind: ClassVar[str] = "azure_vm_gallery_application"
mapping: ClassVar[Dict[str, Bender]] = {
"configuration_reference": S("configurationReference"),
"enable_automatic_upgrade": S("enableAutomaticUpgrade"),
"order": S("order"),
"package_reference_id": S("packageReferenceId"),
"tags": S("tags"),
"treat_failure_as_deployment_failure": S("treatFailureAsDeploymentFailure"),
}
configuration_reference: Optional[str] = field(default=None, metadata={'description': 'Optional, specifies the uri to an azure blob that will replace the default configuration for the package if provided.'}) # fmt: skip
enable_automatic_upgrade: Optional[bool] = field(default=None, metadata={'description': 'If set to true, when a new gallery application version is available in pir/sig, it will be automatically updated for the vm/vmss.'}) # fmt: skip
order: Optional[int] = field(default=None, metadata={'description': 'Optional, specifies the order in which the packages have to be installed.'}) # fmt: skip
package_reference_id: Optional[str] = field(default=None, metadata={'description': 'Specifies the galleryapplicationversion resource id on the form of /subscriptions/{subscriptionid}/resourcegroups/{resourcegroupname}/providers/microsoft. Compute/galleries/{galleryname}/applications/{application}/versions/{version}.'}) # fmt: skip
tags: Optional[str] = field(default=None, metadata={'description': 'Optional, specifies a passthrough value for more generic context.'}) # fmt: skip
treat_failure_as_deployment_failure: Optional[bool] = field(default=None, metadata={'description': 'Optional, if true, any failure for any operation in the vmapplication will fail the deployment.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureApplicationProfile:
kind: ClassVar[str] = "azure_application_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"gallery_applications": S("galleryApplications") >> ForallBend(AzureVMGalleryApplication.mapping)
}
gallery_applications: Optional[List[AzureVMGalleryApplication]] = field(default=None, metadata={'description': 'Specifies the gallery applications that should be made available to the vm/vmss.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureResourceWithOptionalLocation:
kind: ClassVar[str] = "azure_resource_with_optional_location"
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"location": S("location"),
"name": S("name"),
"tags": S("tags"),
"type": S("type"),
}
id: Optional[str] = field(default=None, metadata={"description": "Resource id."})
location: Optional[str] = field(default=None, metadata={"description": "Resource location."})
name: Optional[str] = field(default=None, metadata={"description": "Resource name."})
tags: Optional[Dict[str, str]] = field(default=None, metadata={"description": "Resource tags."})
type: Optional[str] = field(default=None, metadata={"description": "Resource type."})
@define(eq=False, slots=False)
class AzureVirtualMachineExtension(AzureResourceWithOptionalLocation):
kind: ClassVar[str] = "azure_virtual_machine_extension"
mapping: ClassVar[Dict[str, Bender]] = {
"auto_upgrade_minor_version": S("properties", "autoUpgradeMinorVersion"),
"enable_automatic_upgrade": S("properties", "enableAutomaticUpgrade"),
"force_update_tag": S("properties", "forceUpdateTag"),
"machine_extension_instance_view": S("properties", "instanceView")
>> Bend(AzureVirtualMachineExtensionInstanceView.mapping),
"protected_settings": S("properties", "protectedSettings"),
"protected_settings_from_key_vault": S("properties", "protectedSettingsFromKeyVault")
>> Bend(AzureKeyVaultSecretReference.mapping),
"provision_after_extensions": S("properties", "provisionAfterExtensions"),
"provisioning_state": S("properties", "provisioningState"),
"publisher": S("properties", "publisher"),
"settings": S("properties", "settings"),
"suppress_failures": S("properties", "suppressFailures"),
"type": S("properties", "type"),
"type_handler_version": S("properties", "typeHandlerVersion"),
}
auto_upgrade_minor_version: Optional[bool] = field(default=None, metadata={'description': 'Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.'}) # fmt: skip
enable_automatic_upgrade: Optional[bool] = field(default=None, metadata={'description': 'Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available.'}) # fmt: skip
force_update_tag: Optional[str] = field(default=None, metadata={'description': 'How the extension handler should be forced to update even if the extension configuration has not changed.'}) # fmt: skip
machine_extension_instance_view: Optional[AzureVirtualMachineExtensionInstanceView] = field(default=None, metadata={'description': 'The instance view of a virtual machine extension.'}) # fmt: skip
protected_settings: Optional[Any] = field(default=None, metadata={'description': 'The extension can contain either protectedsettings or protectedsettingsfromkeyvault or no protected settings at all.'}) # fmt: skip
protected_settings_from_key_vault: Optional[AzureKeyVaultSecretReference] = field(default=None, metadata={'description': 'Describes a reference to key vault secret.'}) # fmt: skip
provision_after_extensions: Optional[List[str]] = field(default=None, metadata={'description': 'Collection of extension names after which this extension needs to be provisioned.'}) # fmt: skip
provisioning_state: Optional[str] = field(default=None, metadata={'description': 'The provisioning state, which only appears in the response.'}) # fmt: skip
publisher: Optional[str] = field(default=None, metadata={'description': 'The name of the extension handler publisher.'}) # fmt: skip
settings: Optional[Any] = field(default=None, metadata={'description': 'Json formatted public settings for the extension.'}) # fmt: skip
suppress_failures: Optional[bool] = field(default=None, metadata={'description': 'Indicates whether failures stemming from the extension will be suppressed (operational failures such as not connecting to the vm will not be suppressed regardless of this value). The default is false.'}) # fmt: skip
type: Optional[str] = field(default=None, metadata={'description': 'Specifies the type of the extension; an example is customscriptextension.'}) # fmt: skip
type_handler_version: Optional[str] = field(default=None, metadata={'description': 'Specifies the version of the script handler.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineIdentity:
kind: ClassVar[str] = "azure_virtual_machine_identity"
mapping: ClassVar[Dict[str, Bender]] = {
"principal_id": S("principalId"),
"tenant_id": S("tenantId"),
"type": S("type"),
"user_assigned_identities": S("userAssignedIdentities"),
}
principal_id: Optional[str] = field(default=None, metadata={'description': 'The principal id of virtual machine identity. This property will only be provided for a system assigned identity.'}) # fmt: skip
tenant_id: Optional[str] = field(default=None, metadata={'description': 'The tenant id associated with the virtual machine. This property will only be provided for a system assigned identity.'}) # fmt: skip
type: Optional[str] = field(default=None, metadata={'description': 'The type of identity used for the virtual machine. The type systemassigned, userassigned includes both an implicitly created identity and a set of user assigned identities. The type none will remove any identities from the virtual machine.'}) # fmt: skip
user_assigned_identities: Optional[Dict[str, AzurePrincipalidClientid]] = field(default=None, metadata={'description': 'The list of user identities associated with the virtual machine. The user identity dictionary key references will be arm resource ids in the form: /subscriptions/{subscriptionid}/resourcegroups/{resourcegroupname}/providers/microsoft. Managedidentity/userassignedidentities/{identityname}.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachine(AzureResource):
kind: ClassVar[str] = "azure_virtual_machine"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2023-03-01",
path="/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines",
path_parameters=["subscriptionId"],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": S("properties", "timeCreated"),
"mtime": K(None),
"atime": K(None),
"virtual_machine_capabilities": S("properties", "additionalCapabilities")
>> Bend(AzureAdditionalCapabilities.mapping),
"application_profile": S("properties", "applicationProfile") >> Bend(AzureApplicationProfile.mapping),
"availability_set": S("properties", "availabilitySet", "id"),
"billing_profile": S("properties", "billingProfile", "maxPrice"),
"capacity_reservation": S("properties", "capacityReservation") >> Bend(AzureCapacityReservationProfile.mapping),
"virtual_machine_diagnostics_profile": S("properties", "diagnosticsProfile")
>> Bend(AzureDiagnosticsProfile.mapping),
"eviction_policy": S("properties", "evictionPolicy"),
"extended_location": S("extendedLocation") >> Bend(AzureExtendedLocation.mapping),
"extensions_time_budget": S("properties", "extensionsTimeBudget"),
"hardware_profile": S("properties", "hardwareProfile") >> Bend(AzureHardwareProfile.mapping),
"host": S("properties", "host", "id"),
"host_group": S("properties", "hostGroup", "id"),
"virtual_machine_identity": S("identity") >> Bend(AzureVirtualMachineIdentity.mapping),
"virtual_machine_instance_view": S("properties", "instanceView")
>> Bend(AzureVirtualMachineInstanceView.mapping),
"license_type": S("properties", "licenseType"),
"virtual_machine_network_profile": S("properties", "networkProfile") >> Bend(AzureNetworkProfile.mapping),
"virtual_machine_os_profile": S("properties", "osProfile") >> Bend(AzureOSProfile.mapping),
"azure_plan": S("plan") >> Bend(AzurePlan.mapping),
"platform_fault_domain": S("properties", "platformFaultDomain"),
"virtual_machine_priority": S("properties", "priority"),
"provisioning_state": S("properties", "provisioningState"),
"proximity_placement_group": S("properties", "proximityPlacementGroup", "id"),
"virtual_machine_resources": S("resources") >> ForallBend(AzureVirtualMachineExtension.mapping),
"scheduled_events_profile": S("properties", "scheduledEventsProfile")
>> Bend(AzureScheduledEventsProfile.mapping),
"virtual_machine_security_profile": S("properties", "securityProfile") >> Bend(AzureSecurityProfile.mapping),
"virtual_machine_storage_profile": S("properties", "storageProfile") >> Bend(AzureStorageProfile.mapping),
"time_created": S("properties", "timeCreated"),
"user_data": S("properties", "userData"),
"virtual_machine_scale_set": S("properties", "virtualMachineScaleSet", "id"),
"vm_id": S("properties", "vmId"),
}
virtual_machine_capabilities: Optional[AzureAdditionalCapabilities] = field(default=None, metadata={'description': 'Enables or disables a capability on the virtual machine or virtual machine scale set.'}) # fmt: skip
application_profile: Optional[AzureApplicationProfile] = field(default=None, metadata={'description': 'Contains the list of gallery applications that should be made available to the vm/vmss.'}) # fmt: skip
availability_set: Optional[str] = field(default=None, metadata={"description": ""})
billing_profile: Optional[float] = field(default=None, metadata={'description': 'Specifies the billing related details of a azure spot vm or vmss. Minimum api-version: 2019-03-01.'}) # fmt: skip
capacity_reservation: Optional[AzureCapacityReservationProfile] = field(default=None, metadata={'description': 'The parameters of a capacity reservation profile.'}) # fmt: skip
virtual_machine_diagnostics_profile: Optional[AzureDiagnosticsProfile] = field(default=None, metadata={'description': 'Specifies the boot diagnostic settings state. Minimum api-version: 2015-06-15.'}) # fmt: skip
eviction_policy: Optional[str] = field(default=None, metadata={'description': 'Specifies the eviction policy for the azure spot vm/vmss.'}) # fmt: skip
extended_location: Optional[AzureExtendedLocation] = field(default=None, metadata={'description': 'The complex type of the extended location.'}) # fmt: skip
extensions_time_budget: Optional[str] = field(default=None, metadata={'description': 'Specifies the time alloted for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in iso 8601 format. The default value is 90 minutes (pt1h30m). Minimum api-version: 2020-06-01.'}) # fmt: skip
hardware_profile: Optional[AzureHardwareProfile] = field(default=None, metadata={'description': 'Specifies the hardware settings for the virtual machine.'}) # fmt: skip
host: Optional[str] = field(default=None, metadata={"description": ""})
host_group: Optional[str] = field(default=None, metadata={"description": ""})
virtual_machine_identity: Optional[AzureVirtualMachineIdentity] = field(default=None, metadata={'description': 'Identity for the virtual machine.'}) # fmt: skip
virtual_machine_instance_view: Optional[AzureVirtualMachineInstanceView] = field(default=None, metadata={'description': 'The instance view of a virtual machine.'}) # fmt: skip
license_type: Optional[str] = field(default=None, metadata={'description': 'Specifies that the image or disk that is being used was licensed on-premises. Possible values for windows server operating system are: windows_client windows_server possible values for linux server operating system are: rhel_byos (for rhel) sles_byos (for suse) for more information, see [azure hybrid use benefit for windows server](https://docs. Microsoft. Com/azure/virtual-machines/windows/hybrid-use-benefit-licensing) [azure hybrid use benefit for linux server](https://docs. Microsoft. Com/azure/virtual-machines/linux/azure-hybrid-benefit-linux) minimum api-version: 2015-06-15.'}) # fmt: skip
virtual_machine_network_profile: Optional[AzureNetworkProfile] = field(default=None, metadata={'description': 'Specifies the network interfaces or the networking configuration of the virtual machine.'}) # fmt: skip
virtual_machine_os_profile: Optional[AzureOSProfile] = field(default=None, metadata={'description': 'Specifies the operating system settings for the virtual machine. Some of the settings cannot be changed once vm is provisioned.'}) # fmt: skip
azure_plan: Optional[AzurePlan] = field(default=None, metadata={'description': 'Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an api, you must enable the image for programmatic use. In the azure portal, find the marketplace image that you want to use and then click **want to deploy programmatically, get started ->**. Enter any required information and then click **save**.'}) # fmt: skip
platform_fault_domain: Optional[int] = field(default=None, metadata={'description': 'Specifies the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will by automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualmachinescaleset property of this virtual machine is set. The virtual machine scale set that is referenced, must have platformfaultdomaincount greater than 1. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view. Minimum api‐version: 2020‐12‐01.'}) # fmt: skip
virtual_machine_priority: Optional[str] = field(default=None, metadata={'description': 'Specifies the priority for a standalone virtual machine or the virtual machines in the scale set. Low enum will be deprecated in the future, please use spot as the enum to deploy azure spot vm/vmss.'}) # fmt: skip
provisioning_state: Optional[str] = field(default=None, metadata={'description': 'The provisioning state, which only appears in the response.'}) # fmt: skip
proximity_placement_group: Optional[str] = field(default=None, metadata={"description": ""})
virtual_machine_resources: Optional[List[AzureVirtualMachineExtension]] = field(default=None, metadata={'description': 'The virtual machine child extension resources.'}) # fmt: skip
scheduled_events_profile: Optional[AzureScheduledEventsProfile] = field(default=None, metadata={"description": ""})
virtual_machine_security_profile: Optional[AzureSecurityProfile] = field(default=None, metadata={'description': 'Specifies the security profile settings for the virtual machine or virtual machine scale set.'}) # fmt: skip
virtual_machine_storage_profile: Optional[AzureStorageProfile] = field(default=None, metadata={'description': 'Specifies the storage settings for the virtual machine disks.'}) # fmt: skip
time_created: Optional[datetime] = field(default=None, metadata={'description': 'Specifies the time at which the virtual machine resource was created. Minimum api-version: 2021-11-01.'}) # fmt: skip
user_data: Optional[str] = field(default=None, metadata={'description': 'Userdata for the vm, which must be base-64 encoded. Customer should not pass any secrets in here. Minimum api-version: 2021-03-01.'}) # fmt: skip
virtual_machine_scale_set: Optional[str] = field(default=None, metadata={"description": ""})
vm_id: Optional[str] = field(default=None, metadata={'description': 'Specifies the vm unique id which is a 128-bits identifier that is encoded and stored in all azure iaas vms smbios and can be read using platform bios commands.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureRollingUpgradePolicy:
kind: ClassVar[str] = "azure_rolling_upgrade_policy"
mapping: ClassVar[Dict[str, Bender]] = {
"enable_cross_zone_upgrade": S("enableCrossZoneUpgrade"),
"max_batch_instance_percent": S("maxBatchInstancePercent"),
"max_surge": S("maxSurge"),
"max_unhealthy_instance_percent": S("maxUnhealthyInstancePercent"),
"max_unhealthy_upgraded_instance_percent": S("maxUnhealthyUpgradedInstancePercent"),
"pause_time_between_batches": S("pauseTimeBetweenBatches"),
"prioritize_unhealthy_instances": S("prioritizeUnhealthyInstances"),
"rollback_failed_instances_on_policy_breach": S("rollbackFailedInstancesOnPolicyBreach"),
}
enable_cross_zone_upgrade: Optional[bool] = field(default=None, metadata={'description': 'Allow vmss to ignore az boundaries when constructing upgrade batches. Take into consideration the update domain and maxbatchinstancepercent to determine the batch size.'}) # fmt: skip
max_batch_instance_percent: Optional[int] = field(default=None, metadata={'description': 'The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or future batches can cause the percentage of instances in a batch to decrease to ensure higher reliability. The default value for this parameter is 20%.'}) # fmt: skip
max_surge: Optional[bool] = field(default=None, metadata={'description': 'Create new virtual machines to upgrade the scale set, rather than updating the existing virtual machines. Existing virtual machines will be deleted once the new virtual machines are created for each batch.'}) # fmt: skip
max_unhealthy_instance_percent: Optional[int] = field(default=None, metadata={'description': 'The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy, either as a result of being upgraded, or by being found in an unhealthy state by the virtual machine health checks before the rolling upgrade aborts. This constraint will be checked prior to starting any batch. The default value for this parameter is 20%.'}) # fmt: skip
max_unhealthy_upgraded_instance_percent: Optional[int] = field(default=None, metadata={'description': 'The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. This check will happen after each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. The default value for this parameter is 20%.'}) # fmt: skip
pause_time_between_batches: Optional[str] = field(default=None, metadata={'description': 'The wait time between completing the update for all virtual machines in one batch and starting the next batch. The time duration should be specified in iso 8601 format. The default value is 0 seconds (pt0s).'}) # fmt: skip
prioritize_unhealthy_instances: Optional[bool] = field(default=None, metadata={'description': 'Upgrade all unhealthy instances in a scale set before any healthy instances.'}) # fmt: skip
rollback_failed_instances_on_policy_breach: Optional[bool] = field(default=None, metadata={'description': 'Rollback failed instances to previous model if the rolling upgrade policy is violated.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureAutomaticOSUpgradePolicy:
kind: ClassVar[str] = "azure_automatic_os_upgrade_policy"
mapping: ClassVar[Dict[str, Bender]] = {
"disable_automatic_rollback": S("disableAutomaticRollback"),
"enable_automatic_os_upgrade": S("enableAutomaticOSUpgrade"),
"use_rolling_upgrade_policy": S("useRollingUpgradePolicy"),
}
disable_automatic_rollback: Optional[bool] = field(default=None, metadata={'description': 'Whether os image rollback feature should be disabled. Default value is false.'}) # fmt: skip
enable_automatic_os_upgrade: Optional[bool] = field(default=None, metadata={'description': 'Indicates whether os upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the os image becomes available. Default value is false. If this is set to true for windows based scale sets, [enableautomaticupdates](https://docs. Microsoft. Com/dotnet/api/microsoft. Azure. Management. Compute. Models. Windowsconfiguration. Enableautomaticupdates?view=azure-dotnet) is automatically set to false and cannot be set to true.'}) # fmt: skip
use_rolling_upgrade_policy: Optional[bool] = field(default=None, metadata={'description': 'Indicates whether rolling upgrade policy should be used during auto os upgrade. Default value is false. Auto os upgrade will fallback to the default policy if no policy is defined on the vmss.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureUpgradePolicy:
kind: ClassVar[str] = "azure_upgrade_policy"
mapping: ClassVar[Dict[str, Bender]] = {
"automatic_os_upgrade_policy": S("automaticOSUpgradePolicy") >> Bend(AzureAutomaticOSUpgradePolicy.mapping),
"mode": S("mode"),
"rolling_upgrade_policy": S("rollingUpgradePolicy") >> Bend(AzureRollingUpgradePolicy.mapping),
}
automatic_os_upgrade_policy: Optional[AzureAutomaticOSUpgradePolicy] = field(default=None, metadata={'description': 'The configuration parameters used for performing automatic os upgrade.'}) # fmt: skip
mode: Optional[str] = field(default=None, metadata={'description': 'Specifies the mode of an upgrade to virtual machines in the scale set. Possible values are: **manual** - you control the application of updates to virtual machines in the scale set. You do this by using the manualupgrade action. **automatic** - all virtual machines in the scale set are automatically updated at the same time.'}) # fmt: skip
rolling_upgrade_policy: Optional[AzureRollingUpgradePolicy] = field(default=None, metadata={'description': 'The configuration parameters used while performing a rolling upgrade.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureAutomaticRepairsPolicy:
kind: ClassVar[str] = "azure_automatic_repairs_policy"
mapping: ClassVar[Dict[str, Bender]] = {
"enabled": S("enabled"),
"grace_period": S("gracePeriod"),
"repair_action": S("repairAction"),
}
enabled: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether automatic repairs should be enabled on the virtual machine scale set. The default value is false.'}) # fmt: skip
grace_period: Optional[str] = field(default=None, metadata={'description': 'The amount of time for which automatic repairs are suspended due to a state change on vm. The grace time starts after the state change has completed. This helps avoid premature or accidental repairs. The time duration should be specified in iso 8601 format. The minimum allowed grace period is 10 minutes (pt10m), which is also the default value. The maximum allowed grace period is 90 minutes (pt90m).'}) # fmt: skip
repair_action: Optional[str] = field(default=None, metadata={'description': 'Type of repair action (replace, restart, reimage) that will be used for repairing unhealthy virtual machines in the scale set. Default value is replace.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineScaleSetOSProfile:
kind: ClassVar[str] = "azure_virtual_machine_scale_set_os_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"admin_password": S("adminPassword"),
"admin_username": S("adminUsername"),
"allow_extension_operations": S("allowExtensionOperations"),
"computer_name_prefix": S("computerNamePrefix"),
"custom_data": S("customData"),
"linux_configuration": S("linuxConfiguration") >> Bend(AzureLinuxConfiguration.mapping),
"require_guest_provision_signal": S("requireGuestProvisionSignal"),
"secrets": S("secrets") >> ForallBend(AzureVaultSecretGroup.mapping),
"windows_configuration": S("windowsConfiguration") >> Bend(AzureWindowsConfiguration.mapping),
}
admin_password: Optional[str] = field(default=None, metadata={'description': 'Specifies the password of the administrator account. **minimum-length (windows):** 8 characters **minimum-length (linux):** 6 characters **max-length (windows):** 123 characters **max-length (linux):** 72 characters **complexity requirements:** 3 out of 4 conditions below need to be fulfilled has lower characters has upper characters has a digit has a special character (regex match [\\w_]) **disallowed values:** abc@123 , p@$$w0rd , p@ssw0rd , p@ssword123 , pa$$word , pass@word1 , password! , password1 , password22 , iloveyou! for resetting the password, see [how to reset the remote desktop service or its login password in a windows vm](https://docs. Microsoft. Com/troubleshoot/azure/virtual-machines/reset-rdp) for resetting root password, see [manage users, ssh, and check or repair disks on azure linux vms using the vmaccess extension](https://docs. Microsoft. Com/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection).'}) # fmt: skip
admin_username: Optional[str] = field(default=None, metadata={'description': 'Specifies the name of the administrator account. **windows-only restriction:** cannot end in. **disallowed values:** administrator , admin , user , user1 , test , user2 , test1 , user3 , admin1 , 1 , 123 , a , actuser , adm , admin2 , aspnet , backup , console , david , guest , john , owner , root , server , sql , support , support_388945a0 , sys , test2 , test3 , user4 , user5. **minimum-length (linux):** 1 character **max-length (linux):** 64 characters **max-length (windows):** 20 characters.'}) # fmt: skip
allow_extension_operations: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether extension operations should be allowed on the virtual machine scale set. This may only be set to false when no extensions are present on the virtual machine scale set.'}) # fmt: skip
computer_name_prefix: Optional[str] = field(default=None, metadata={'description': 'Specifies the computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long.'}) # fmt: skip
custom_data: Optional[str] = field(default=None, metadata={'description': 'Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the virtual machine. The maximum length of the binary array is 65535 bytes. For using cloud-init for your vm, see [using cloud-init to customize a linux vm during creation](https://docs. Microsoft. Com/azure/virtual-machines/linux/using-cloud-init).'}) # fmt: skip
linux_configuration: Optional[AzureLinuxConfiguration] = field(default=None, metadata={'description': 'Specifies the linux operating system settings on the virtual machine. For a list of supported linux distributions, see [linux on azure-endorsed distributions](https://docs. Microsoft. Com/azure/virtual-machines/linux/endorsed-distros).'}) # fmt: skip
require_guest_provision_signal: Optional[bool] = field(default=None, metadata={'description': 'Optional property which must either be set to true or omitted.'}) # fmt: skip
secrets: Optional[List[AzureVaultSecretGroup]] = field(default=None, metadata={'description': 'Specifies set of certificates that should be installed onto the virtual machines in the scale set. To install certificates on a virtual machine it is recommended to use the [azure key vault virtual machine extension for linux](https://docs. Microsoft. Com/azure/virtual-machines/extensions/key-vault-linux) or the [azure key vault virtual machine extension for windows](https://docs. Microsoft. Com/azure/virtual-machines/extensions/key-vault-windows).'}) # fmt: skip
windows_configuration: Optional[AzureWindowsConfiguration] = field(default=None, metadata={'description': 'Specifies windows operating system settings on the virtual machine.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineScaleSetManagedDiskParameters:
kind: ClassVar[str] = "azure_virtual_machine_scale_set_managed_disk_parameters"
mapping: ClassVar[Dict[str, Bender]] = {
"disk_encryption_set": S("diskEncryptionSet") >> Bend(AzureDiskEncryptionSetParameters.mapping),
"security_profile": S("securityProfile") >> Bend(AzureVMDiskSecurityProfile.mapping),
"storage_account_type": S("storageAccountType"),
}
disk_encryption_set: Optional[AzureDiskEncryptionSetParameters] = field(default=None, metadata={'description': 'Describes the parameter of customer managed disk encryption set resource id that can be specified for disk. **note:** the disk encryption set resource id can only be specified for managed disk. Please refer https://aka. Ms/mdssewithcmkoverview for more details.'}) # fmt: skip
security_profile: Optional[AzureVMDiskSecurityProfile] = field(default=None, metadata={'description': 'Specifies the security profile settings for the managed disk. **note:** it can only be set for confidential vms.'}) # fmt: skip
storage_account_type: Optional[str] = field(default=None, metadata={'description': 'Specifies the storage account type for the managed disk. Managed os disk storage account type can only be set when you create the scale set. Note: ultrassd_lrs can only be used with data disks. It cannot be used with os disk. Standard_lrs uses standard hdd. Standardssd_lrs uses standard ssd. Premium_lrs uses premium ssd. Ultrassd_lrs uses ultra disk. Premium_zrs uses premium ssd zone redundant storage. Standardssd_zrs uses standard ssd zone redundant storage. For more information regarding disks supported for windows virtual machines, refer to https://docs. Microsoft. Com/azure/virtual-machines/windows/disks-types and, for linux virtual machines, refer to https://docs. Microsoft. Com/azure/virtual-machines/linux/disks-types.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineScaleSetOSDisk:
kind: ClassVar[str] = "azure_virtual_machine_scale_set_os_disk"
mapping: ClassVar[Dict[str, Bender]] = {
"caching": S("caching"),
"create_option": S("createOption"),
"delete_option": S("deleteOption"),
"diff_disk_settings": S("diffDiskSettings") >> Bend(AzureDiffDiskSettings.mapping),
"disk_size_gb": S("diskSizeGB"),
"image": S("image", "uri"),
"managed_disk": S("managedDisk") >> Bend(AzureVirtualMachineScaleSetManagedDiskParameters.mapping),
"name": S("name"),
"os_type": S("osType"),
"vhd_containers": S("vhdContainers"),
"write_accelerator_enabled": S("writeAcceleratorEnabled"),
}
caching: Optional[str] = field(default=None, metadata={'description': 'Specifies the caching requirements. Possible values are: **none,** **readonly,** **readwrite. ** the default values are: **none for standard storage. Readonly for premium storage**.'}) # fmt: skip
create_option: Optional[str] = field(default=None, metadata={'description': 'Specifies how the virtual machine should be created. Possible values are: **attach. ** this value is used when you are using a specialized disk to create the virtual machine. **fromimage. ** this value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imagereference element described above. If you are using a marketplace image, you also use the plan element previously described.'}) # fmt: skip
delete_option: Optional[str] = field(default=None, metadata={'description': 'Specifies the behavior of the managed disk when the vm gets deleted, for example whether the managed disk is deleted or detached. Supported values are: **delete. ** if this value is used, the managed disk is deleted when vm gets deleted. **detach. ** if this value is used, the managed disk is retained after vm gets deleted. Minimum api-version: 2021-03-01.'}) # fmt: skip
diff_disk_settings: Optional[AzureDiffDiskSettings] = field(default=None, metadata={'description': 'Describes the parameters of ephemeral disk settings that can be specified for operating system disk. **note:** the ephemeral disk settings can only be specified for managed disk.'}) # fmt: skip
disk_size_gb: Optional[int] = field(default=None, metadata={'description': 'Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image. The property disksizegb is the number of bytes x 1024^3 for the disk and the value cannot be larger than 1023.'}) # fmt: skip
image: Optional[str] = field(default=None, metadata={"description": "Describes the uri of a disk."})
managed_disk: Optional[AzureVirtualMachineScaleSetManagedDiskParameters] = field(default=None, metadata={'description': 'Describes the parameters of a scaleset managed disk.'}) # fmt: skip
name: Optional[str] = field(default=None, metadata={"description": "The disk name."})
os_type: Optional[str] = field(default=None, metadata={'description': 'This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd. Possible values are: **windows,** **linux. **.'}) # fmt: skip
vhd_containers: Optional[List[str]] = field(default=None, metadata={'description': 'Specifies the container urls that are used to store operating system disks for the scale set.'}) # fmt: skip
write_accelerator_enabled: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether writeaccelerator should be enabled or disabled on the disk.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineScaleSetDataDisk:
kind: ClassVar[str] = "azure_virtual_machine_scale_set_data_disk"
mapping: ClassVar[Dict[str, Bender]] = {
"caching": S("caching"),
"create_option": S("createOption"),
"delete_option": S("deleteOption"),
"disk_iops_read_write": S("diskIOPSReadWrite"),
"disk_m_bps_read_write": S("diskMBpsReadWrite"),
"disk_size_gb": S("diskSizeGB"),
"lun": S("lun"),
"managed_disk": S("managedDisk") >> Bend(AzureVirtualMachineScaleSetManagedDiskParameters.mapping),
"name": S("name"),
"write_accelerator_enabled": S("writeAcceleratorEnabled"),
}
caching: Optional[str] = field(default=None, metadata={'description': 'Specifies the caching requirements. Possible values are: **none,** **readonly,** **readwrite. ** the default values are: **none for standard storage. Readonly for premium storage**.'}) # fmt: skip
create_option: Optional[str] = field(default=None, metadata={'description': 'Specifies how the virtual machine should be created. Possible values are: **attach. ** this value is used when you are using a specialized disk to create the virtual machine. **fromimage. ** this value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imagereference element described above. If you are using a marketplace image, you also use the plan element previously described.'}) # fmt: skip
delete_option: Optional[str] = field(default=None, metadata={'description': 'Specifies the behavior of the managed disk when the vm gets deleted, for example whether the managed disk is deleted or detached. Supported values are: **delete. ** if this value is used, the managed disk is deleted when vm gets deleted. **detach. ** if this value is used, the managed disk is retained after vm gets deleted. Minimum api-version: 2021-03-01.'}) # fmt: skip
disk_iops_read_write: Optional[int] = field(default=None, metadata={'description': 'Specifies the read-write iops for the managed disk. Should be used only when storageaccounttype is ultrassd_lrs. If not specified, a default value would be assigned based on disksizegb.'}) # fmt: skip
disk_m_bps_read_write: Optional[int] = field(default=None, metadata={'description': 'Specifies the bandwidth in mb per second for the managed disk. Should be used only when storageaccounttype is ultrassd_lrs. If not specified, a default value would be assigned based on disksizegb.'}) # fmt: skip
disk_size_gb: Optional[int] = field(default=None, metadata={'description': 'Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image. The property disksizegb is the number of bytes x 1024^3 for the disk and the value cannot be larger than 1023.'}) # fmt: skip
lun: Optional[int] = field(default=None, metadata={'description': 'Specifies the logical unit number of the data disk. This value is used to identify data disks within the vm and therefore must be unique for each data disk attached to a vm.'}) # fmt: skip
managed_disk: Optional[AzureVirtualMachineScaleSetManagedDiskParameters] = field(default=None, metadata={'description': 'Describes the parameters of a scaleset managed disk.'}) # fmt: skip
name: Optional[str] = field(default=None, metadata={"description": "The disk name."})
write_accelerator_enabled: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether writeaccelerator should be enabled or disabled on the disk.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineScaleSetStorageProfile:
kind: ClassVar[str] = "azure_virtual_machine_scale_set_storage_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"data_disks": S("dataDisks") >> ForallBend(AzureVirtualMachineScaleSetDataDisk.mapping),
"disk_controller_type": S("diskControllerType"),
"image_reference": S("imageReference") >> Bend(AzureImageReference.mapping),
"os_disk": S("osDisk") >> Bend(AzureVirtualMachineScaleSetOSDisk.mapping),
}
data_disks: Optional[List[AzureVirtualMachineScaleSetDataDisk]] = field(default=None, metadata={'description': 'Specifies the parameters that are used to add data disks to the virtual machines in the scale set. For more information about disks, see [about disks and vhds for azure virtual machines](https://docs. Microsoft. Com/azure/virtual-machines/managed-disks-overview).'}) # fmt: skip
disk_controller_type: Optional[str] = field(default=None, metadata={"description": ""})
image_reference: Optional[AzureImageReference] = field(default=None, metadata={'description': 'Specifies information about the image to use. You can specify information about platform images, marketplace images, or virtual machine images. This element is required when you want to use a platform image, marketplace image, or virtual machine image, but is not used in other creation operations. Note: image reference publisher and offer can only be set when you create the scale set.'}) # fmt: skip
os_disk: Optional[AzureVirtualMachineScaleSetOSDisk] = field(default=None, metadata={'description': 'Describes a virtual machine scale set operating system disk.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineScaleSetNetworkConfigurationDnsSettings:
kind: ClassVar[str] = "azure_virtual_machine_scale_set_network_configuration_dns_settings"
mapping: ClassVar[Dict[str, Bender]] = {"dns_servers": S("dnsServers")}
dns_servers: Optional[List[str]] = field(
default=None, metadata={"description": "List of dns servers ip addresses."}
)
@define(eq=False, slots=False)
class AzureVirtualMachineScaleSetIpTag:
kind: ClassVar[str] = "azure_virtual_machine_scale_set_ip_tag"
mapping: ClassVar[Dict[str, Bender]] = {"ip_tag_type": S("ipTagType"), "tag": S("tag")}
ip_tag_type: Optional[str] = field(default=None, metadata={'description': 'Ip tag type. Example: firstpartyusage.'}) # fmt: skip
tag: Optional[str] = field(default=None, metadata={'description': 'Ip tag associated with the public ip. Example: sql, storage etc.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineScaleSetPublicIPAddressConfiguration:
kind: ClassVar[str] = "azure_virtual_machine_scale_set_public_ip_address_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"delete_option": S("properties", "deleteOption"),
"dns_settings": S("properties", "dnsSettings", "domainNameLabel"),
"idle_timeout_in_minutes": S("properties", "idleTimeoutInMinutes"),
"ip_tags": S("properties", "ipTags") >> ForallBend(AzureVirtualMachineScaleSetIpTag.mapping),
"name": S("name"),
"public_ip_address_version": S("properties", "publicIPAddressVersion"),
"public_ip_prefix": S("properties", "publicIPPrefix", "id"),
"sku": S("sku") >> Bend(AzurePublicIPAddressSku.mapping),
}
delete_option: Optional[str] = field(default=None, metadata={'description': 'Specify what happens to the public ip when the vm is deleted.'}) # fmt: skip
dns_settings: Optional[str] = field(default=None, metadata={'description': 'Describes a virtual machines scale sets network configuration s dns settings.'}) # fmt: skip
idle_timeout_in_minutes: Optional[int] = field(default=None, metadata={'description': 'The idle timeout of the public ip address.'}) # fmt: skip
ip_tags: Optional[List[AzureVirtualMachineScaleSetIpTag]] = field(default=None, metadata={'description': 'The list of ip tags associated with the public ip address.'}) # fmt: skip
name: Optional[str] = field(default=None, metadata={"description": "The publicip address configuration name."})
public_ip_address_version: Optional[str] = field(default=None, metadata={'description': 'Available from api-version 2019-07-01 onwards, it represents whether the specific ipconfiguration is ipv4 or ipv6. Default is taken as ipv4. Possible values are: ipv4 and ipv6.'}) # fmt: skip
public_ip_prefix: Optional[str] = field(default=None, metadata={"description": ""})
sku: Optional[AzurePublicIPAddressSku] = field(default=None, metadata={'description': 'Describes the public ip sku. It can only be set with orchestrationmode as flexible.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineScaleSetIPConfiguration:
kind: ClassVar[str] = "azure_virtual_machine_scale_set_ip_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"application_gateway_backend_address_pools": S("properties")
>> S("applicationGatewayBackendAddressPools", default=[])
>> ForallBend(S("id")),
"application_security_groups": S("properties")
>> S("applicationSecurityGroups", default=[])
>> ForallBend(S("id")),
"load_balancer_backend_address_pools": S("properties")
>> S("loadBalancerBackendAddressPools", default=[])
>> ForallBend(S("id")),
"load_balancer_inbound_nat_pools": S("properties")
>> S("loadBalancerInboundNatPools", default=[])
>> ForallBend(S("id")),
"name": S("name"),
"primary": S("properties", "primary"),
"private_ip_address_version": S("properties", "privateIPAddressVersion"),
"public_ip_address_configuration": S("properties", "publicIPAddressConfiguration")
>> Bend(AzureVirtualMachineScaleSetPublicIPAddressConfiguration.mapping),
"subnet": S("properties", "subnet", "id"),
}
application_gateway_backend_address_pools: Optional[List[str]] = field(default=None, metadata={'description': 'Specifies an array of references to backend address pools of application gateways. A scale set can reference backend address pools of multiple application gateways. Multiple scale sets cannot use the same application gateway.'}) # fmt: skip
application_security_groups: Optional[List[str]] = field(default=None, metadata={'description': 'Specifies an array of references to application security group.'}) # fmt: skip
load_balancer_backend_address_pools: Optional[List[str]] = field(default=None, metadata={'description': 'Specifies an array of references to backend address pools of load balancers. A scale set can reference backend address pools of one public and one internal load balancer. Multiple scale sets cannot use the same basic sku load balancer.'}) # fmt: skip
load_balancer_inbound_nat_pools: Optional[List[str]] = field(default=None, metadata={'description': 'Specifies an array of references to inbound nat pools of the load balancers. A scale set can reference inbound nat pools of one public and one internal load balancer. Multiple scale sets cannot use the same basic sku load balancer.'}) # fmt: skip
name: Optional[str] = field(default=None, metadata={"description": "The ip configuration name."})
primary: Optional[bool] = field(default=None, metadata={'description': 'Specifies the primary network interface in case the virtual machine has more than 1 network interface.'}) # fmt: skip
private_ip_address_version: Optional[str] = field(default=None, metadata={'description': 'Available from api-version 2017-03-30 onwards, it represents whether the specific ipconfiguration is ipv4 or ipv6. Default is taken as ipv4. Possible values are: ipv4 and ipv6.'}) # fmt: skip
public_ip_address_configuration: Optional[AzureVirtualMachineScaleSetPublicIPAddressConfiguration] = field(default=None, metadata={'description': 'Describes a virtual machines scale set ip configuration s publicipaddress configuration.'}) # fmt: skip
subnet: Optional[str] = field(default=None, metadata={"description": "The api entity reference."})
@define(eq=False, slots=False)
class AzureVirtualMachineScaleSetNetworkConfiguration:
kind: ClassVar[str] = "azure_virtual_machine_scale_set_network_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"delete_option": S("properties", "deleteOption"),
"disable_tcp_state_tracking": S("properties", "disableTcpStateTracking"),
"dns_settings": S("properties", "dnsSettings")
>> Bend(AzureVirtualMachineScaleSetNetworkConfigurationDnsSettings.mapping),
"enable_accelerated_networking": S("properties", "enableAcceleratedNetworking"),
"enable_fpga": S("properties", "enableFpga"),
"enable_ip_forwarding": S("properties", "enableIPForwarding"),
"ip_configurations": S("properties", "ipConfigurations")
>> ForallBend(AzureVirtualMachineScaleSetIPConfiguration.mapping),
"name": S("name"),
"network_security_group": S("properties", "networkSecurityGroup", "id"),
"primary": S("properties", "primary"),
}
delete_option: Optional[str] = field(default=None, metadata={'description': 'Specify what happens to the network interface when the vm is deleted.'}) # fmt: skip
disable_tcp_state_tracking: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether the network interface is disabled for tcp state tracking.'}) # fmt: skip
dns_settings: Optional[AzureVirtualMachineScaleSetNetworkConfigurationDnsSettings] = field(default=None, metadata={'description': 'Describes a virtual machines scale sets network configuration s dns settings.'}) # fmt: skip
enable_accelerated_networking: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether the network interface is accelerated networking-enabled.'}) # fmt: skip
enable_fpga: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether the network interface is fpga networking-enabled.'}) # fmt: skip
enable_ip_forwarding: Optional[bool] = field(default=None, metadata={'description': 'Whether ip forwarding enabled on this nic.'}) # fmt: skip
ip_configurations: Optional[List[AzureVirtualMachineScaleSetIPConfiguration]] = field(default=None, metadata={'description': 'Specifies the ip configurations of the network interface.'}) # fmt: skip
name: Optional[str] = field(default=None, metadata={"description": "The network configuration name."})
network_security_group: Optional[str] = field(default=None, metadata={"description": ""})
primary: Optional[bool] = field(default=None, metadata={'description': 'Specifies the primary network interface in case the virtual machine has more than 1 network interface.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineScaleSetNetworkProfile:
kind: ClassVar[str] = "azure_virtual_machine_scale_set_network_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"health_probe": S("healthProbe", "id"),
"network_api_version": S("networkApiVersion"),
"network_interface_configurations": S("networkInterfaceConfigurations")
>> ForallBend(AzureVirtualMachineScaleSetNetworkConfiguration.mapping),
}
health_probe: Optional[str] = field(default=None, metadata={"description": "The api entity reference."})
network_api_version: Optional[str] = field(default=None, metadata={'description': 'Specifies the microsoft. Network api version used when creating networking resources in the network interface configurations for virtual machine scale set with orchestration mode flexible.'}) # fmt: skip
network_interface_configurations: Optional[List[AzureVirtualMachineScaleSetNetworkConfiguration]] = field(default=None, metadata={'description': 'The list of network configurations.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineScaleSetExtension(AzureSubResourceReadOnly):
kind: ClassVar[str] = "azure_virtual_machine_scale_set_extension"
mapping: ClassVar[Dict[str, Bender]] = {
"auto_upgrade_minor_version": S("properties", "autoUpgradeMinorVersion"),
"enable_automatic_upgrade": S("properties", "enableAutomaticUpgrade"),
"force_update_tag": S("properties", "forceUpdateTag"),
"name": S("name"),
"protected_settings": S("properties", "protectedSettings"),
"protected_settings_from_key_vault": S("properties", "protectedSettingsFromKeyVault")
>> Bend(AzureKeyVaultSecretReference.mapping),
"provision_after_extensions": S("properties", "provisionAfterExtensions"),
"provisioning_state": S("properties", "provisioningState"),
"publisher": S("properties", "publisher"),
"settings": S("properties", "settings"),
"suppress_failures": S("properties", "suppressFailures"),
"type": S("properties", "type"),
"type_handler_version": S("properties", "typeHandlerVersion"),
}
auto_upgrade_minor_version: Optional[bool] = field(default=None, metadata={'description': 'Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.'}) # fmt: skip
enable_automatic_upgrade: Optional[bool] = field(default=None, metadata={'description': 'Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available.'}) # fmt: skip
force_update_tag: Optional[str] = field(default=None, metadata={'description': 'If a value is provided and is different from the previous value, the extension handler will be forced to update even if the extension configuration has not changed.'}) # fmt: skip
name: Optional[str] = field(default=None, metadata={"description": "The name of the extension."})
protected_settings: Optional[Any] = field(default=None, metadata={'description': 'The extension can contain either protectedsettings or protectedsettingsfromkeyvault or no protected settings at all.'}) # fmt: skip
protected_settings_from_key_vault: Optional[AzureKeyVaultSecretReference] = field(default=None, metadata={'description': 'Describes a reference to key vault secret.'}) # fmt: skip
provision_after_extensions: Optional[List[str]] = field(default=None, metadata={'description': 'Collection of extension names after which this extension needs to be provisioned.'}) # fmt: skip
provisioning_state: Optional[str] = field(default=None, metadata={'description': 'The provisioning state, which only appears in the response.'}) # fmt: skip
publisher: Optional[str] = field(default=None, metadata={'description': 'The name of the extension handler publisher.'}) # fmt: skip
settings: Optional[Any] = field(default=None, metadata={'description': 'Json formatted public settings for the extension.'}) # fmt: skip
suppress_failures: Optional[bool] = field(default=None, metadata={'description': 'Indicates whether failures stemming from the extension will be suppressed (operational failures such as not connecting to the vm will not be suppressed regardless of this value). The default is false.'}) # fmt: skip
type: Optional[str] = field(default=None, metadata={'description': 'Specifies the type of the extension; an example is customscriptextension.'}) # fmt: skip
type_handler_version: Optional[str] = field(default=None, metadata={'description': 'Specifies the version of the script handler.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineScaleSetExtensionProfile:
kind: ClassVar[str] = "azure_virtual_machine_scale_set_extension_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"extensions": S("extensions") >> ForallBend(AzureVirtualMachineScaleSetExtension.mapping),
"extensions_time_budget": S("extensionsTimeBudget"),
}
extensions: Optional[List[AzureVirtualMachineScaleSetExtension]] = field(default=None, metadata={'description': 'The virtual machine scale set child extension resources.'}) # fmt: skip
extensions_time_budget: Optional[str] = field(default=None, metadata={'description': 'Specifies the time alloted for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in iso 8601 format. The default value is 90 minutes (pt1h30m). Minimum api-version: 2020-06-01.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineScaleSetHardwareProfile:
kind: ClassVar[str] = "azure_virtual_machine_scale_set_hardware_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"vm_size_properties": S("vmSizeProperties") >> Bend(AzureVMSizeProperties.mapping)
}
vm_size_properties: Optional[AzureVMSizeProperties] = field(default=None, metadata={'description': 'Specifies vm size property settings on the virtual machine.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureSecurityPostureReference:
kind: ClassVar[str] = "azure_security_posture_reference"
mapping: ClassVar[Dict[str, Bender]] = {
"exclude_extensions": S("excludeExtensions") >> ForallBend(AzureVirtualMachineExtension.mapping),
"id": S("id"),
}
exclude_extensions: Optional[List[AzureVirtualMachineExtension]] = field(default=None, metadata={'description': 'List of virtual machine extensions to exclude when applying the security posture.'}) # fmt: skip
id: Optional[str] = field(default=None, metadata={'description': 'The security posture reference id in the form of /communitygalleries/{communitygalleryname}/securitypostures/{securityposturename}/versions/{major. Minor. Patch}|{major. *}|latest.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineScaleSetVMProfile:
kind: ClassVar[str] = "azure_virtual_machine_scale_set_vm_profile"
mapping: ClassVar[Dict[str, Bender]] = {
"application_profile": S("applicationProfile") >> Bend(AzureApplicationProfile.mapping),
"billing_profile": S("billingProfile", "maxPrice"),
"capacity_reservation": S("capacityReservation") >> Bend(AzureCapacityReservationProfile.mapping),
"diagnostics_profile": S("diagnosticsProfile") >> Bend(AzureDiagnosticsProfile.mapping),
"eviction_policy": S("evictionPolicy"),
"extension_profile": S("extensionProfile") >> Bend(AzureVirtualMachineScaleSetExtensionProfile.mapping),
"hardware_profile": S("hardwareProfile") >> Bend(AzureVirtualMachineScaleSetHardwareProfile.mapping),
"license_type": S("licenseType"),
"network_profile": S("networkProfile") >> Bend(AzureVirtualMachineScaleSetNetworkProfile.mapping),
"os_profile": S("osProfile") >> Bend(AzureVirtualMachineScaleSetOSProfile.mapping),
"priority": S("priority"),
"scheduled_events_profile": S("scheduledEventsProfile") >> Bend(AzureScheduledEventsProfile.mapping),
"security_posture_reference": S("securityPostureReference") >> Bend(AzureSecurityPostureReference.mapping),
"security_profile": S("securityProfile") >> Bend(AzureSecurityProfile.mapping),
"service_artifact_reference": S("serviceArtifactReference", "id"),
"storage_profile": S("storageProfile") >> Bend(AzureVirtualMachineScaleSetStorageProfile.mapping),
"user_data": S("userData"),
}
application_profile: Optional[AzureApplicationProfile] = field(default=None, metadata={'description': 'Contains the list of gallery applications that should be made available to the vm/vmss.'}) # fmt: skip
billing_profile: Optional[float] = field(default=None, metadata={'description': 'Specifies the billing related details of a azure spot vm or vmss. Minimum api-version: 2019-03-01.'}) # fmt: skip
capacity_reservation: Optional[AzureCapacityReservationProfile] = field(default=None, metadata={'description': 'The parameters of a capacity reservation profile.'}) # fmt: skip
diagnostics_profile: Optional[AzureDiagnosticsProfile] = field(default=None, metadata={'description': 'Specifies the boot diagnostic settings state. Minimum api-version: 2015-06-15.'}) # fmt: skip
eviction_policy: Optional[str] = field(default=None, metadata={'description': 'Specifies the eviction policy for the azure spot vm/vmss.'}) # fmt: skip
extension_profile: Optional[AzureVirtualMachineScaleSetExtensionProfile] = field(default=None, metadata={'description': 'Describes a virtual machine scale set extension profile.'}) # fmt: skip
hardware_profile: Optional[AzureVirtualMachineScaleSetHardwareProfile] = field(default=None, metadata={'description': 'Specifies the hardware settings for the virtual machine scale set.'}) # fmt: skip
license_type: Optional[str] = field(default=None, metadata={'description': 'Specifies that the image or disk that is being used was licensed on-premises. Possible values for windows server operating system are: windows_client windows_server possible values for linux server operating system are: rhel_byos (for rhel) sles_byos (for suse) for more information, see [azure hybrid use benefit for windows server](https://docs. Microsoft. Com/azure/virtual-machines/windows/hybrid-use-benefit-licensing) [azure hybrid use benefit for linux server](https://docs. Microsoft. Com/azure/virtual-machines/linux/azure-hybrid-benefit-linux) minimum api-version: 2015-06-15.'}) # fmt: skip
network_profile: Optional[AzureVirtualMachineScaleSetNetworkProfile] = field(default=None, metadata={'description': 'Describes a virtual machine scale set network profile.'}) # fmt: skip
os_profile: Optional[AzureVirtualMachineScaleSetOSProfile] = field(default=None, metadata={'description': 'Describes a virtual machine scale set os profile.'}) # fmt: skip
priority: Optional[str] = field(default=None, metadata={'description': 'Specifies the priority for a standalone virtual machine or the virtual machines in the scale set. Low enum will be deprecated in the future, please use spot as the enum to deploy azure spot vm/vmss.'}) # fmt: skip
scheduled_events_profile: Optional[AzureScheduledEventsProfile] = field(default=None, metadata={"description": ""})
security_posture_reference: Optional[AzureSecurityPostureReference] = field(default=None, metadata={'description': 'Specifies the security posture to be used for all virtual machines in the scale set. Minimum api-version: 2023-03-01.'}) # fmt: skip
security_profile: Optional[AzureSecurityProfile] = field(default=None, metadata={'description': 'Specifies the security profile settings for the virtual machine or virtual machine scale set.'}) # fmt: skip
service_artifact_reference: Optional[str] = field(default=None, metadata={'description': 'Specifies the service artifact reference id used to set same image version for all virtual machines in the scale set when using latest image version. Minimum api-version: 2022-11-01.'}) # fmt: skip
storage_profile: Optional[AzureVirtualMachineScaleSetStorageProfile] = field(default=None, metadata={'description': 'Describes a virtual machine scale set storage profile.'}) # fmt: skip
user_data: Optional[str] = field(default=None, metadata={'description': 'Userdata for the virtual machines in the scale set, which must be base-64 encoded. Customer should not pass any secrets in here. Minimum api-version: 2021-03-01.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureScaleInPolicy:
kind: ClassVar[str] = "azure_scale_in_policy"
mapping: ClassVar[Dict[str, Bender]] = {"force_deletion": S("forceDeletion"), "rules": S("rules")}
force_deletion: Optional[bool] = field(default=None, metadata={'description': 'This property allows you to specify if virtual machines chosen for removal have to be force deleted when a virtual machine scale set is being scaled-in. (feature in preview).'}) # fmt: skip
rules: Optional[List[str]] = field(default=None, metadata={'description': 'The rules to be followed when scaling-in a virtual machine scale set. Possible values are: **default** when a virtual machine scale set is scaled in, the scale set will first be balanced across zones if it is a zonal scale set. Then, it will be balanced across fault domains as far as possible. Within each fault domain, the virtual machines chosen for removal will be the newest ones that are not protected from scale-in. **oldestvm** when a virtual machine scale set is being scaled-in, the oldest virtual machines that are not protected from scale-in will be chosen for removal. For zonal virtual machine scale sets, the scale set will first be balanced across zones. Within each zone, the oldest virtual machines that are not protected will be chosen for removal. **newestvm** when a virtual machine scale set is being scaled-in, the newest virtual machines that are not protected from scale-in will be chosen for removal. For zonal virtual machine scale sets, the scale set will first be balanced across zones. Within each zone, the newest virtual machines that are not protected will be chosen for removal.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureSpotRestorePolicy:
kind: ClassVar[str] = "azure_spot_restore_policy"
mapping: ClassVar[Dict[str, Bender]] = {"enabled": S("enabled"), "restore_timeout": S("restoreTimeout")}
enabled: Optional[bool] = field(default=None, metadata={'description': 'Enables the spot-try-restore feature where evicted vmss spot instances will be tried to be restored opportunistically based on capacity availability and pricing constraints.'}) # fmt: skip
restore_timeout: Optional[str] = field(default=None, metadata={'description': 'Timeout value expressed as an iso 8601 time duration after which the platform will not try to restore the vmss spot instances.'}) # fmt: skip
@define(eq=False, slots=False)
class AzurePriorityMixPolicy:
kind: ClassVar[str] = "azure_priority_mix_policy"
mapping: ClassVar[Dict[str, Bender]] = {
"base_regular_priority_count": S("baseRegularPriorityCount"),
"regular_priority_percentage_above_base": S("regularPriorityPercentageAboveBase"),
}
base_regular_priority_count: Optional[int] = field(default=None, metadata={'description': 'The base number of regular priority vms that will be created in this scale set as it scales out.'}) # fmt: skip
regular_priority_percentage_above_base: Optional[int] = field(default=None, metadata={'description': 'The percentage of vm instances, after the base regular priority count has been reached, that are expected to use regular priority.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineScaleSetIdentity:
kind: ClassVar[str] = "azure_virtual_machine_scale_set_identity"
mapping: ClassVar[Dict[str, Bender]] = {
"principal_id": S("principalId"),
"tenant_id": S("tenantId"),
"type": S("type"),
"user_assigned_identities": S("userAssignedIdentities"),
}
principal_id: Optional[str] = field(default=None, metadata={'description': 'The principal id of virtual machine scale set identity. This property will only be provided for a system assigned identity.'}) # fmt: skip
tenant_id: Optional[str] = field(default=None, metadata={'description': 'The tenant id associated with the virtual machine scale set. This property will only be provided for a system assigned identity.'}) # fmt: skip
type: Optional[str] = field(default=None, metadata={'description': 'The type of identity used for the virtual machine scale set. The type systemassigned, userassigned includes both an implicitly created identity and a set of user assigned identities. The type none will remove any identities from the virtual machine scale set.'}) # fmt: skip
user_assigned_identities: Optional[Dict[str, AzurePrincipalidClientid]] = field(default=None, metadata={'description': 'The list of user identities associated with the virtual machine. The user identity dictionary key references will be arm resource ids in the form: /subscriptions/{subscriptionid}/resourcegroups/{resourcegroupname}/providers/microsoft. Managedidentity/userassignedidentities/{identityname}.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineScaleSet(AzureResource):
kind: ClassVar[str] = "azure_virtual_machine_scale_set"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2023-03-01",
path="/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachineScaleSets",
path_parameters=["subscriptionId"],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": S("properties", "timeCreated"),
"mtime": K(None),
"atime": K(None),
"scale_set_capabilities": S("properties", "additionalCapabilities")
>> Bend(AzureAdditionalCapabilities.mapping),
"automatic_repairs_policy": S("properties", "automaticRepairsPolicy")
>> Bend(AzureAutomaticRepairsPolicy.mapping),
"constrained_maximum_capacity": S("properties", "constrainedMaximumCapacity"),
"do_not_run_extensions_on_overprovisioned_v_ms": S("properties", "doNotRunExtensionsOnOverprovisionedVMs"),
"extended_location": S("extendedLocation") >> Bend(AzureExtendedLocation.mapping),
"host_group": S("properties", "hostGroup", "id"),
"scale_set_identity": S("identity") >> Bend(AzureVirtualMachineScaleSetIdentity.mapping),
"orchestration_mode": S("properties", "orchestrationMode"),
"overprovision": S("properties", "overprovision"),
"azure_plan": S("plan") >> Bend(AzurePlan.mapping),
"platform_fault_domain_count": S("properties", "platformFaultDomainCount"),
"priority_mix_policy": S("properties", "priorityMixPolicy") >> Bend(AzurePriorityMixPolicy.mapping),
"provisioning_state": S("properties", "provisioningState"),
"proximity_placement_group": S("properties", "proximityPlacementGroup", "id"),
"scale_in_policy": S("properties", "scaleInPolicy") >> Bend(AzureScaleInPolicy.mapping),
"single_placement_group": S("properties", "singlePlacementGroup"),
"scale_set_sku": S("sku") >> Bend(AzureSku.mapping),
"spot_restore_policy": S("properties", "spotRestorePolicy") >> Bend(AzureSpotRestorePolicy.mapping),
"time_created": S("properties", "timeCreated"),
"unique_id": S("properties", "uniqueId"),
"upgrade_policy": S("properties", "upgradePolicy") >> Bend(AzureUpgradePolicy.mapping),
"virtual_machine_profile": S("properties", "virtualMachineProfile")
>> Bend(AzureVirtualMachineScaleSetVMProfile.mapping),
"zone_balance": S("properties", "zoneBalance"),
}
scale_set_capabilities: Optional[AzureAdditionalCapabilities] = field(default=None, metadata={'description': 'Enables or disables a capability on the virtual machine or virtual machine scale set.'}) # fmt: skip
automatic_repairs_policy: Optional[AzureAutomaticRepairsPolicy] = field(default=None, metadata={'description': 'Specifies the configuration parameters for automatic repairs on the virtual machine scale set.'}) # fmt: skip
constrained_maximum_capacity: Optional[bool] = field(default=None, metadata={'description': 'Optional property which must either be set to true or omitted.'}) # fmt: skip
do_not_run_extensions_on_overprovisioned_v_ms: Optional[bool] = field(default=None, metadata={'description': 'When overprovision is enabled, extensions are launched only on the requested number of vms which are finally kept. This property will hence ensure that the extensions do not run on the extra overprovisioned vms.'}) # fmt: skip
extended_location: Optional[AzureExtendedLocation] = field(default=None, metadata={'description': 'The complex type of the extended location.'}) # fmt: skip
host_group: Optional[str] = field(default=None, metadata={"description": ""})
scale_set_identity: Optional[AzureVirtualMachineScaleSetIdentity] = field(default=None, metadata={'description': 'Identity for the virtual machine scale set.'}) # fmt: skip
orchestration_mode: Optional[str] = field(default=None, metadata={'description': 'Specifies the orchestration mode for the virtual machine scale set.'}) # fmt: skip
overprovision: Optional[bool] = field(default=None, metadata={'description': 'Specifies whether the virtual machine scale set should be overprovisioned.'}) # fmt: skip
azure_plan: Optional[AzurePlan] = field(default=None, metadata={'description': 'Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an api, you must enable the image for programmatic use. In the azure portal, find the marketplace image that you want to use and then click **want to deploy programmatically, get started ->**. Enter any required information and then click **save**.'}) # fmt: skip
platform_fault_domain_count: Optional[int] = field(default=None, metadata={'description': 'Fault domain count for each placement group.'}) # fmt: skip
priority_mix_policy: Optional[AzurePriorityMixPolicy] = field(default=None, metadata={'description': 'Specifies the target splits for spot and regular priority vms within a scale set with flexible orchestration mode. With this property the customer is able to specify the base number of regular priority vms created as the vmss flex instance scales out and the split between spot and regular priority vms after this base target has been reached.'}) # fmt: skip
provisioning_state: Optional[str] = field(default=None, metadata={'description': 'The provisioning state, which only appears in the response.'}) # fmt: skip
proximity_placement_group: Optional[str] = field(default=None, metadata={"description": ""})
scale_in_policy: Optional[AzureScaleInPolicy] = field(default=None, metadata={'description': 'Describes a scale-in policy for a virtual machine scale set.'}) # fmt: skip
single_placement_group: Optional[bool] = field(default=None, metadata={'description': 'When true this limits the scale set to a single placement group, of max size 100 virtual machines. Note: if singleplacementgroup is true, it may be modified to false. However, if singleplacementgroup is false, it may not be modified to true.'}) # fmt: skip
scale_set_sku: Optional[AzureSku] = field(default=None, metadata={'description': 'Describes a virtual machine scale set sku. Note: if the new vm sku is not supported on the hardware the scale set is currently on, you need to deallocate the vms in the scale set before you modify the sku name.'}) # fmt: skip
spot_restore_policy: Optional[AzureSpotRestorePolicy] = field(default=None, metadata={'description': 'Specifies the spot-try-restore properties for the virtual machine scale set. With this property customer can enable or disable automatic restore of the evicted spot vmss vm instances opportunistically based on capacity availability and pricing constraint.'}) # fmt: skip
time_created: Optional[datetime] = field(default=None, metadata={'description': 'Specifies the time at which the virtual machine scale set resource was created. Minimum api-version: 2021-11-01.'}) # fmt: skip
unique_id: Optional[str] = field(default=None, metadata={'description': 'Specifies the id which uniquely identifies a virtual machine scale set.'}) # fmt: skip
upgrade_policy: Optional[AzureUpgradePolicy] = field(default=None, metadata={'description': 'Describes an upgrade policy - automatic, manual, or rolling.'}) # fmt: skip
virtual_machine_profile: Optional[AzureVirtualMachineScaleSetVMProfile] = field(default=None, metadata={'description': 'Describes a virtual machine scale set virtual machine profile.'}) # fmt: skip
zone_balance: Optional[bool] = field(default=None, metadata={'description': 'Whether to force strictly even virtual machine distribution cross x-zones in case there is zone outage. Zonebalance property can only be set if the zones property of the scale set contains more than one zone. If there are no zones or only one zone specified, then zonebalance property should not be set.'}) # fmt: skip
@define(eq=False, slots=False)
class AzureVirtualMachineSize(AzureResource):
kind: ClassVar[str] = "azure_virtual_machine_size"
api_spec: ClassVar[AzureApiSpec] = AzureApiSpec(
service="compute",
version="2023-03-01",
path="/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/vmSizes",
path_parameters=["location", "subscriptionId"],
query_parameters=["api-version"],
access_path="value",
expect_array=True,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("id"),
"tags": S("tags", default={}),
"name": S("name"),
"ctime": K(None),
"mtime": K(None),
"atime": K(None),
"max_data_disk_count": S("maxDataDiskCount"),
"memory_in_mb": S("memoryInMB"),
"number_of_cores": S("numberOfCores"),
"os_disk_size_in_mb": S("osDiskSizeInMB"),
"resource_disk_size_in_mb": S("resourceDiskSizeInMB"),
}
max_data_disk_count: Optional[int] = field(default=None, metadata={'description': 'The maximum number of data disks that can be attached to the virtual machine size.'}) # fmt: skip
memory_in_mb: Optional[int] = field(default=None, metadata={'description': 'The amount of memory, in mb, supported by the virtual machine size.'}) # fmt: skip
number_of_cores: Optional[int] = field(default=None, metadata={'description': 'The number of cores supported by the virtual machine size. For constrained vcpu capable vm sizes, this number represents the total vcpus of quota that the vm uses. For accurate vcpu count, please refer to https://docs. Microsoft. Com/azure/virtual-machines/constrained-vcpu or https://docs. Microsoft. Com/rest/api/compute/resourceskus/list.'}) # fmt: skip
os_disk_size_in_mb: Optional[int] = field(default=None, metadata={'description': 'The os disk size, in mb, allowed by the virtual machine size.'}) # fmt: skip
resource_disk_size_in_mb: Optional[int] = field(default=None, metadata={'description': 'The resource disk size, in mb, allowed by the virtual machine size.'}) # fmt: skip
resources: List[Type[AzureResource]] = [
AzureAvailabilitySet,
AzureCapacityReservationGroup,
AzureCloudService,
AzureComputeOperationValue,
AzureDedicatedHostGroup,
AzureDisk,
AzureDiskAccess,
AzureDiskEncryptionSet,
AzureGallery,
AzureImage,
AzureProximityPlacementGroup,
# AzureResourceSku, TODO: handle resource skus correctly
AzureRestorePointCollection,
AzureSnapshot,
AzureSshPublicKeyResource,
AzureVirtualMachine,
AzureVirtualMachineScaleSet,
AzureVirtualMachineSize,
] | /resoto-plugin-azure-3.6.5.tar.gz/resoto-plugin-azure-3.6.5/resoto_plugin_azure/resource/compute.py | 0.896115 | 0.255349 | compute.py | pypi |
from typing import ClassVar
from attrs import define, field
from resotolib.json import value_in_path
from resotolib.types import Json
default_config = {
"default": {"age": "2h"},
"tags": ["owner", "expiration"],
"kinds": [
"aws_ec2_instance",
"aws_ec2_volume",
"aws_vpc",
"aws_cloudformation_stack",
"aws_elb",
"aws_alb",
"aws_alb_target_group",
"aws_eks_cluster",
"aws_eks_nodegroup",
"example_instance",
"example_network",
],
"accounts": {
"aws": {
"068564737731": {"name": "playground", "age": "7d"},
"575584959047": {
"name": "eng-sre",
},
},
"example": {
"Example Account": {
"name": "Example Account",
}
},
},
}
@define
class CleanupUntaggedConfig:
kind: ClassVar[str] = "plugin_cleanup_untagged"
enabled: bool = field(
default=False,
metadata={"description": "Enable plugin?", "restart_required": True},
)
config: Json = field(
factory=lambda: default_config,
metadata={
"description": (
"Configuration for the plugin\n"
"See https://github.com/someengineering/resoto/tree/main/plugins/cleanup_untagged for syntax details"
)
},
)
@staticmethod
def validate(cfg: "CleanupUntaggedConfig") -> bool:
config = cfg.config
required_sections = ["tags", "kinds", "accounts"]
for section in required_sections:
if section not in config:
raise ValueError(f"Section '{section}' not found in config")
if not isinstance(config["tags"], list) or len(config["tags"]) == 0:
raise ValueError("Error in 'tags' section")
if not isinstance(config["kinds"], list) or len(config["kinds"]) == 0:
raise ValueError("Error in 'kinds' section")
if not isinstance(config["accounts"], dict) or len(config["accounts"]) == 0:
raise ValueError("Error in 'accounts' section")
maybe_default_age = value_in_path(config, ["default", "age"])
for cloud_id, account in config["accounts"].items():
for account_id, account_data in account.items():
if "name" not in account_data:
raise ValueError(f"Missing 'name' for account '{cloud_id}/{account_id}")
if "age" in account_data:
account_data["age"] = account_data.get("age")
elif maybe_default_age is None:
raise ValueError(f"Missing 'age' for account '{cloud_id}/{account_id}' and no default age defined'")
else:
account_data["age"] = maybe_default_age
return True | /resoto_plugin_cleanup_untagged-3.6.5-py3-none-any.whl/resoto_plugin_cleanup_untagged/config.py | 0.688992 | 0.196595 | config.py | pypi |
import logging
from datetime import datetime
from typing import Union, Callable, Any, Dict, Optional, Tuple, List
log = logging.getLogger("resoto." + __name__)
def get_result_data(result: Dict[str, Any], value: Union[str, Callable[..., Any]]) -> Any:
"""Returns data from a DO API call result dict.
Args:
result: Dict containing the result
value: Either directly the name of a key found in result or
a callable like a lambda that finds the relevant data withing
result.
"""
data = None
if callable(value):
try:
data = value(result)
except Exception:
log.exception(f"Exception while trying to fetch data calling {value}")
elif value in result:
data = result[value]
return data
class RetryableHttpError(Exception):
pass
def retry_on_error(e: Any) -> bool:
if isinstance(e, RetryableHttpError):
log.info(f"Got a retryable error {e} - retrying")
return True
return False
def iso2datetime(ts: Optional[str]) -> Optional[datetime]:
if ts is None:
return None
if ts.endswith("Z"):
ts = ts[:-1] + "+00:00"
if ts is not None:
return datetime.fromisoformat(ts)
def region_id(slug: str) -> str:
return f"do:region:{slug}"
def project_id(value: str) -> str:
return f"do:project:{value}"
def droplet_id(value: int) -> str:
return f"do:droplet:{value}"
def kubernetes_id(value: str) -> str:
return f"do:kubernetes:{value}"
def volume_id(value: int) -> str:
return f"do:volume:{value}"
def vpc_id(value: str) -> str:
return f"do:vpc:{value}"
def snapshot_id(value: int) -> str:
return f"do:snapshot:{value}"
def loadbalancer_id(value: int) -> str:
return f"do:loadbalancer:{value}"
def floatingip_id(value: str) -> str:
return f"do:floatingip:{value}"
def database_id(value: str) -> str:
return f"do:dbaas:{value}"
def image_id(value: str) -> str:
return f"do:image:{value}"
def size_id(value: str) -> str:
return f"do:size:{value}"
def space_id(value: str) -> str:
return f"do:space:{value}"
def app_id(value: str) -> str:
return f"do:app:{value}"
def cdn_endpoint_id(value: str) -> str:
return f"do:cdn_endpoint:{value}"
def certificate_id(value: str) -> str:
return f"do:certificate:{value}"
def container_registry_id(value: str) -> str:
return f"do:cr:{value}"
def container_registry_repository_id(registry_id: str, repository_id: str) -> str:
return f"do:crr:{registry_id}/{repository_id}"
def container_registry_repository_tag_id(registry_id: str, repository_id: str, tag: str) -> str:
return f"do:crrt:{registry_id}/{repository_id}:{tag}"
def ssh_key_id(value: str) -> str:
return f"do:ssh_key:{value}"
def tag_id(value: str) -> str:
return f"do:tag:{value}"
def domain_id(value: str) -> str:
return f"do:domain:{value}"
def domain_record_id(value: str) -> str:
return f"do:domain_record:{value}"
def firewall_id(value: str) -> str:
return f"do:firewall:{value}"
def alert_policy_id(value: str) -> str:
return f"do:alert:{value}"
def droplet_neighborhood_id(value: str) -> str:
return f"do:neighborhood:{value}"
tag_value_sep: str = "--"
def parse_tag(tag: str) -> Optional[Tuple[str, Optional[str]]]:
splitted = iter(tag.split(tag_value_sep, 1))
key = next(splitted, None)
if key is None:
return None
value = next(splitted, None)
return (key, value)
def parse_tags(tags: List[str]) -> Dict[str, Optional[str]]:
parsed_tags = {}
for tag in tags:
if parsed_tag := parse_tag(tag):
key, value = parsed_tag
parsed_tags[key] = value
return parsed_tags
def dump_tag(key: str, value: Optional[str]) -> str:
if value and len(value) > 0:
return f"{key}{tag_value_sep}{value}"
else:
return f"{key}" | /resoto_plugin_digitalocean-3.6.5-py3-none-any.whl/resoto_plugin_digitalocean/utils.py | 0.783077 | 0.357035 | utils.py | pypi |
from typing import Optional, List, Tuple, cast, Any
from resoto_plugin_digitalocean.client import StreamingWrapper, get_team_credentials
from resoto_plugin_digitalocean.collector import DigitalOceanTeamCollector
from resoto_plugin_digitalocean.resources import DigitalOceanResource, DigitalOceanTeam
from resoto_plugin_digitalocean.config import (
DigitalOceanCollectorConfig,
DigitalOceanTeamCredentials,
DigitalOceanSpacesKeys,
)
from resoto_plugin_digitalocean.utils import dump_tag
from resotolib.config import Config
from resotolib.baseplugin import BaseCollectorPlugin
from resotolib.core.actions import CoreFeedback
from resotolib.logger import log
from resotolib.graph import Graph
from resotolib.baseresources import BaseResource
import time
class DigitalOceanCollectorPlugin(BaseCollectorPlugin):
cloud = "digitalocean"
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.core_feedback: Optional[CoreFeedback] = None
def collect(self) -> None:
"""This method is being called by resoto whenever the collector runs
It is responsible for querying the cloud APIs for remote resources and adding
them to the plugin graph.
The graph root (self.graph.root) must always be followed by one or more
accounts. An account must always be followed by a region.
A region can contain arbitrary resources.
"""
assert self.core_feedback, "core_feedback is not set" # will be set by the outer collector plugin
def from_legacy_config() -> List[DigitalOceanTeamCredentials]:
tokens: List[str] = Config.digitalocean.api_tokens
spaces_access_keys: List[str] = Config.digitalocean.spaces_access_keys
spaces_keys: List[Tuple[Optional[str], Optional[str]]] = []
def spaces_keys_valid(keys: List[str]) -> bool:
return all([len(key.split(":")) == 2 for key in keys])
if not spaces_keys_valid(spaces_access_keys):
log.warning("DigitalOcean Spaces access keys must be provided in pairs of access_key:secret_key")
else:
def key_to_tuple(key: str) -> Tuple[str, str]:
splitted = key.split(":")
return splitted[0], splitted[1]
spaces_keys = [key_to_tuple(key) for key in spaces_access_keys]
if len(tokens) != len(spaces_access_keys):
log.warning(
"The number of DigitalOcean API tokens and DigitalOcean Spaces access keys must be equal."
+ "Missing or extra spaces access keys will be ignored."
)
spaces_keys = spaces_keys[: len(tokens)]
spaces_keys.extend([(None, None)] * (len(tokens) - len(spaces_keys)))
result = []
for token, space_key_tuple in zip(tokens, spaces_keys):
if (access_key := space_key_tuple[0]) and (secret_key := space_key_tuple[1]):
keys = DigitalOceanSpacesKeys(access_key=access_key, secret_key=secret_key)
else:
keys = None
result.append(DigitalOceanTeamCredentials(api_token=token, spaces_keys=keys))
return result
if credentials_conf := Config.digitalocean.credentials:
credentials = cast(List[DigitalOceanTeamCredentials], credentials_conf)
else:
credentials = from_legacy_config()
log.info(f"plugin: collecting DigitalOcean resources for {len(credentials)} teams")
for c in credentials:
client = StreamingWrapper(
c.api_token,
c.spaces_keys.access_key if c.spaces_keys else None,
c.spaces_keys.secret_key if c.spaces_keys else None,
)
team_graph = self.collect_team(client, self.core_feedback.with_context("digitalocean"))
if team_graph:
self.send_account_graph(team_graph)
def collect_team(self, client: StreamingWrapper, feedback: CoreFeedback) -> Optional[Graph]:
"""Collects an individual team."""
team_id = client.get_team_id()
team = DigitalOceanTeam(id=team_id, tags={}, urn=f"do:team:{team_id}")
try:
feedback.progress_done(team_id, 0, 1)
team_feedback = feedback.with_context("digitalocean", client.get_team_id())
dopc = DigitalOceanTeamCollector(team, client.with_feedback(team_feedback))
dopc.collect()
feedback.progress_done(team_id, 1, 1)
except Exception:
log.exception(f"An unhandled error occurred while collecting team {team_id}")
return None
else:
return dopc.graph
@staticmethod
def add_config(config: Config) -> None:
config.add_config(DigitalOceanCollectorConfig)
@staticmethod
def update_tag(config: Config, resource: BaseResource, key: str, value: str) -> bool:
assert isinstance(resource, DigitalOceanResource)
tag_resource_name = resource.tag_resource_name()
if tag_resource_name:
log.debug(f"Updating tag {key} on resource {resource.id}")
team = resource.account()
ten_minutes_bucket = int(time.time()) // 600
credentials = get_team_credentials(team.id, ten_minutes_bucket)
if credentials is None:
raise RuntimeError(
f"Cannot update tag on resource {resource.id}, credentials not found for team {team.id}"
)
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
credentials.spaces_secret_key,
)
if key in resource.tags:
# resotocore knows about the tag. Therefore we need to clean it first
tag_key = dump_tag(key, resource.tags.get(key))
client.untag_resource(tag_key, tag_resource_name, resource.id)
# we tag the resource using the key-value formatted tag
tag_kv = dump_tag(key, value)
tag_ready: bool = True
tag_count = client.get_tag_count(tag_kv)
# tag count call failed irrecoverably, we can't continue
if isinstance(tag_count, str):
raise RuntimeError(f"Tag update failed. Reason: {tag_count}")
# tag does not exist, create it
if tag_count is None:
tag_ready = client.create_tag(tag_kv)
return tag_ready and client.tag_resource(tag_kv, tag_resource_name, resource.id)
else:
raise NotImplementedError(f"resource {resource.kind} does not support tagging")
@staticmethod
def delete_tag(config: Config, resource: BaseResource, key: str) -> bool:
assert isinstance(resource, DigitalOceanResource)
tag_resource_name = resource.tag_resource_name()
if tag_resource_name:
log.debug(f"Deleting tag {key} on resource {resource.id}")
team = resource.account()
ten_minutes_bucket = int(time.time()) // 600
credentials = get_team_credentials(team.id, ten_minutes_bucket)
if credentials is None:
raise RuntimeError(
f"Cannot update tag on resource {resource.id}, credentials not found for team {team.id}"
)
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
credentials.spaces_secret_key,
)
if key not in resource.tags:
# tag does not exist, nothing to do
return False
tag_key = dump_tag(key, resource.tags.get(key))
untagged = client.untag_resource(tag_key, tag_resource_name, resource.id)
if not untagged:
return False
tag_count = client.get_tag_count(tag_key)
if tag_count == 0:
return client.delete("/tags", tag_key)
return True
else:
raise NotImplementedError(f"resource {resource.kind} does not support tagging") | /resoto_plugin_digitalocean-3.6.5-py3-none-any.whl/resoto_plugin_digitalocean/__init__.py | 0.827096 | 0.202502 | __init__.py | pypi |
import logging
from attrs import define
from typing import ClassVar, Dict, List, Optional
from resoto_plugin_digitalocean.client import StreamingWrapper
from resoto_plugin_digitalocean.client import get_team_credentials
from resotolib.baseresources import (
BaseAccount,
BaseDatabase,
BaseInstance,
BaseIPAddress,
BaseInstanceType,
BaseLoadBalancer,
BaseNetwork,
BaseRegion,
BaseResource,
BaseSnapshot,
BaseVolume,
VolumeStatus,
BaseBucket,
BaseEndpoint,
BaseCertificate,
BaseKeyPair,
BaseDNSZone,
BaseDNSRecord,
ModelReference,
PhantomBaseResource,
)
from resotolib.graph import Graph
import time
from resotolib.types import Json
from resotolib.json import to_json as _to_json
log = logging.getLogger("resoto." + __name__)
@define(eq=False, slots=False)
class DigitalOceanResource(BaseResource):
"""A class that implements the abstract method delete() as well as update_tag()
and delete_tag().
delete() must be implemented. update_tag() and delete_tag() are optional.
"""
kind: ClassVar[str] = "digitalocean_resource"
urn: str = ""
def delete_uri_path(self) -> Optional[str]:
return None
def tag_resource_name(self) -> Optional[str]:
"""Resource name in case tagging is supported by digitalocean.
Not all resources support tagging.
"""
return None
def delete(self, graph: Graph) -> bool:
"""Delete a resource in the cloud"""
delete_uri_path = self.delete_uri_path()
if delete_uri_path:
log.debug(f"Deleting resource {self.id} in account {self.account(graph).id} region {self.region(graph).id}")
team = self.account(graph)
ten_minutes_bucket = int(time.time()) // 600
credentials = get_team_credentials(team.id, ten_minutes_bucket)
if credentials is None:
raise RuntimeError(f"Cannot delete resource {self.id}, credentials not found for team {team.id}")
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
credentials.spaces_secret_key,
)
return client.delete(delete_uri_path, self.id)
raise NotImplementedError
def to_json(self) -> Json:
return _to_json(self, strip_nulls=True, keep_untouched=set(["tags"]))
@define(eq=False, slots=False)
class DigitalOceanTeam(DigitalOceanResource, BaseAccount):
"""DigitalOcean Team"""
kind: ClassVar[str] = "digitalocean_team"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": [
"digitalocean_alert_policy",
"digitalocean_app",
"digitalocean_cdn_endpoint",
"digitalocean_certificate",
"digitalocean_container_registry",
"digitalocean_container_registry_repository",
"digitalocean_container_registry_repository_tag",
"digitalocean_database",
"digitalocean_domain",
"digitalocean_domain_record",
"digitalocean_droplet",
"digitalocean_firewall",
"digitalocean_floating_ip",
"digitalocean_image",
"digitalocean_kubernetes_cluster",
"digitalocean_load_balancer",
"digitalocean_vpc",
"digitalocean_project",
"digitalocean_region",
"digitalocean_resource",
"digitalocean_snapshot",
"digitalocean_space",
"digitalocean_ssh_key",
"digitalocean_volume",
],
"delete": [],
}
}
@define(eq=False, slots=False)
class DigitalOceanRegion(DigitalOceanResource, BaseRegion):
"""DigitalOcean region"""
kind: ClassVar[str] = "digitalocean_region"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": [
"digitalocean_app",
"digitalocean_container_registry",
"digitalocean_database",
"digitalocean_droplet",
"digitalocean_floating_ip",
"digitalocean_image",
"digitalocean_kubernetes_cluster",
"digitalocean_load_balancer",
"digitalocean_vpc",
"digitalocean_snapshot",
"digitalocean_space",
],
"delete": [],
}
}
do_region_slug: Optional[str] = None
do_region_features: Optional[List[str]] = None
is_available: Optional[bool] = None
do_region_droplet_sizes: Optional[List[str]] = None
@define(eq=False, slots=False)
class DigitalOceanProject(DigitalOceanResource, BaseResource):
"""DigitalOcean project"""
kind: ClassVar[str] = "digitalocean_project"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": [
"digitalocean_database",
"digitalocean_domain",
"digitalocean_droplet",
"digitalocean_floating_ip",
"digitalocean_kubernetes_cluster",
"digitalocean_load_balancer",
"digitalocean_space",
"digitalocean_volume",
],
"delete": [
"digitalocean_database",
"digitalocean_domain",
"digitalocean_droplet",
"digitalocean_floating_ip",
"digitalocean_kubernetes_cluster",
"digitalocean_load_balancer",
"digitalocean_space",
"digitalocean_volume",
],
}
}
owner_uuid: Optional[str] = None
owner_id: Optional[str] = None
description: Optional[str] = None
purpose: Optional[str] = None
environment: Optional[str] = None
is_default: Optional[bool] = None
def delete_uri_path(self) -> Optional[str]:
return "/projects"
@define(eq=False, slots=False)
class DigitalOceanDropletSize(DigitalOceanResource, BaseInstanceType):
kind: ClassVar[str] = "digitalocean_droplet_size"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": [
"digitalocean_droplet",
]
}
}
@define(eq=False, slots=False)
class DigitalOceanDroplet(DigitalOceanResource, BaseInstance):
"""A DigitalOcean Droplet Resource
Droplet have a class variable `instance_status_map` which contains
a mapping from the droplet status string the cloud API returns
to our internal InstanceStatus state.
"""
kind: ClassVar[str] = "digitalocean_droplet"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": [
"digitalocean_floating_ip",
"digitalocean_snapshot",
"digitalocean_volume",
],
"delete": [],
}
}
droplet_backup_ids: Optional[List[str]] = None
is_locked: Optional[bool] = None
droplet_features: Optional[List[str]] = None
droplet_image: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/droplets"
def tag_resource_name(self) -> Optional[str]:
return "droplet"
@define(eq=False, slots=False)
class DigitalOceanDropletNeighborhood(DigitalOceanResource, PhantomBaseResource):
"""A DigitalOcean Droplet Neighborhood Resource
Represents a physical hardware server where droplets can be placed.
"""
kind: ClassVar[str] = "digitalocean_droplet_neighborhood"
droplets: Optional[List[str]] = None
@define(eq=False, slots=False)
class DigitalOceanKubernetesCluster(DigitalOceanResource, BaseResource):
"""DigitalOcean Kubernetes Cluster"""
kind: ClassVar[str] = "digitalocean_kubernetes_cluster"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["digitalocean_droplet"],
"delete": [],
}
}
k8s_version: Optional[str] = None
k8s_cluster_subnet: Optional[str] = None
k8s_service_subnet: Optional[str] = None
ipv4_address: Optional[str] = None
endpoint: Optional[str] = None
auto_upgrade_enabled: Optional[bool] = None
cluster_status: Optional[str] = None
surge_upgrade_enabled: Optional[bool] = None
registry_enabled: Optional[bool] = None
ha_enabled: Optional[bool] = None
def delete_uri_path(self) -> Optional[str]:
return "/kubernetes/clusters"
@define(eq=False, slots=False)
class DigitalOceanVolume(DigitalOceanResource, BaseVolume):
kind: ClassVar[str] = "digitalocean_volume"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["digitalocean_snapshot"],
"delete": ["digitalocean_droplet"],
}
}
volume_status_map: ClassVar[Dict[str, VolumeStatus]] = {
"creating": VolumeStatus.BUSY,
"available": VolumeStatus.AVAILABLE,
"in-use": VolumeStatus.IN_USE,
"deleting": VolumeStatus.BUSY,
"deleted": VolumeStatus.DELETED,
"error": VolumeStatus.ERROR,
"busy": VolumeStatus.BUSY,
}
description: Optional[str] = None
filesystem_type: Optional[str] = None
filesystem_label: Optional[str] = None
ondemand_cost: Optional[float] = None
def delete_uri_path(self) -> Optional[str]:
return "/volumes"
def tag_resource_name(self) -> Optional[str]:
return "volume"
@define(eq=False, slots=False)
class DigitalOceanDatabase(DigitalOceanResource, BaseDatabase):
kind: ClassVar[str] = "digitalocean_database"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["digitalocean_app"],
"delete": [],
}
}
def delete_uri_path(self) -> Optional[str]:
return "/databases"
def tag_resource_name(self) -> Optional[str]:
return "database"
@define(eq=False, slots=False)
class DigitalOceanVPC(DigitalOceanResource, BaseNetwork):
"""DigitalOcean network
This is what instances and other networking related resources might reside in.
"""
kind: ClassVar[str] = "digitalocean_vpc"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": [
"digitalocean_load_balancer",
"digitalocean_kubernetes_cluster",
"digitalocean_droplet",
"digitalocean_database",
],
"delete": [
"digitalocean_database",
"digitalocean_droplet",
"digitalocean_kubernetes_cluster",
],
}
}
ip_range: Optional[str] = None
description: Optional[str] = None
is_default: Optional[bool] = None
def delete_uri_path(self) -> Optional[str]:
return "/vpcs"
@define(eq=False, slots=False)
class DigitalOceanSnapshot(DigitalOceanResource, BaseSnapshot):
"""DigitalOcean snapshot"""
kind: ClassVar[str] = "digitalocean_snapshot"
snapshot_size_gigabytes: Optional[int] = None
resource_id: Optional[str] = None
resource_type: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/snapshots"
def tag_resource_name(self) -> Optional[str]:
return "volume_snapshot"
@define(eq=False, slots=False)
class DigitalOceanLoadBalancer(DigitalOceanResource, BaseLoadBalancer):
"""DigitalOcean load balancer"""
kind: ClassVar[str] = "digitalocean_load_balancer"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["digitalocean_droplet"],
"delete": [],
}
}
nr_nodes: Optional[int] = None
loadbalancer_status: Optional[str] = None
redirect_http_to_https: Optional[bool] = None
enable_proxy_protocol: Optional[bool] = None
enable_backend_keepalive: Optional[bool] = None
disable_lets_encrypt_dns_records: Optional[bool] = None
def delete_uri_path(self) -> Optional[str]:
return "/load_balancers"
@define(eq=False, slots=False)
class DigitalOceanFloatingIP(DigitalOceanResource, BaseIPAddress):
"""DigitalOcean floating IP"""
kind: ClassVar[str] = "digitalocean_floating_ip"
is_locked: Optional[bool] = None
def delete(self, graph: Graph) -> bool:
log.debug(f"Deleting resource {self.id} in account {self.account(graph).id} region {self.region(graph).id}")
team = self.account(graph)
ten_minutes_bucket = int(time.time()) // 600
credentials = get_team_credentials(team.id, ten_minutes_bucket)
if credentials is None:
raise RuntimeError(f"Cannot delete resource {self.id}, credentials not found for team {team.id}")
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
credentials.spaces_secret_key,
)
# un-assign the ip just in case it's still assigned to a droplet
client.unassign_floating_ip(self.id)
return client.delete("/floating_ips", self.id)
@define(eq=False, slots=False)
class DigitalOceanImage(DigitalOceanResource, BaseResource):
"""DigitalOcean image"""
kind: ClassVar[str] = "digitalocean_image"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["digitalocean_droplet"],
"delete": [],
}
}
distribution: Optional[str] = None
image_slug: Optional[str] = None
is_public: Optional[bool] = None
min_disk_size: Optional[int] = None
image_type: Optional[str] = None
size_gigabytes: Optional[int] = None
description: Optional[str] = None
image_status: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/images"
def tag_resource_name(self) -> Optional[str]:
return "image"
@define(eq=False, slots=False)
class DigitalOceanSpace(DigitalOceanResource, BaseBucket):
"""DigitalOcean space"""
kind: ClassVar[str] = "digitalocean_space"
def delete(self, graph: Graph) -> bool:
log.debug(f"Deleting space {self.id} in account {self.account(graph).id} region {self.region(graph).id}")
team = self.account(graph)
ten_minutes_bucket = int(time.time()) // 600
credentials = get_team_credentials(team.id, ten_minutes_bucket)
if credentials is None:
raise RuntimeError(f"Cannot delete resource {self.id}, credentials not found for team {team.id}")
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
credentials.spaces_secret_key,
)
return client.delete_space(self.region(graph).id, self.id)
@define(eq=False, slots=False)
class DigitalOceanApp(DigitalOceanResource, BaseResource):
"""DigitalOcean app"""
kind: ClassVar[str] = "digitalocean_app"
tier_slug: Optional[str] = None
default_ingress: Optional[str] = None
live_url: Optional[str] = None
live_url_base: Optional[str] = None
live_domain: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/apps"
@define(eq=False, slots=False)
class DigitalOceanCdnEndpoint(DigitalOceanResource, BaseEndpoint):
"""DigitalOcean CDN endpoint"""
kind = "digitalocean_cdn_endpoint"
origin: Optional[str] = None
endpoint: Optional[str] = None
certificate_id: Optional[str] = None
custom_domain: Optional[str] = None
ttl: Optional[int] = None
def delete_uri_path(self) -> Optional[str]:
return "/cdn/endpoints"
@define(eq=False, slots=False)
class DigitalOceanCertificate(DigitalOceanResource, BaseCertificate):
"""DigitalOcean certificate"""
kind = "digitalocean_certificate"
certificate_state: Optional[str] = None
certificate_type: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/certificates"
@define(eq=False, slots=False)
class DigitalOceanContainerRegistry(DigitalOceanResource, BaseResource):
"""DigitalOcean container registry"""
kind = "digitalocean_container_registry"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["digitalocean_container_registry_repository"],
"delete": [],
}
}
storage_usage_bytes: Optional[int] = None
is_read_only: Optional[bool] = None
def delete(self, graph: Graph) -> bool:
"""Delete the container registry from the cloud"""
log.debug(f"Deleting registry {self.id} in account {self.account(graph).id} region {self.region(graph).id}")
team = self.account(graph)
ten_minutes_bucket = int(time.time()) // 600
credentials = get_team_credentials(team.id, ten_minutes_bucket)
if credentials is None:
raise RuntimeError(f"Cannot delete resource {self.id}, credentials not found for team {team.id}")
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
credentials.spaces_secret_key,
)
return client.delete("/registry", None)
@define(eq=False, slots=False)
class DigitalOceanContainerRegistryRepository(DigitalOceanResource, BaseResource):
"""DigitalOcean container registry repository"""
kind = "digitalocean_container_registry_repository"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["digitalocean_container_registry_repository_tag"],
"delete": [],
}
}
tag_count: Optional[int] = None
manifest_count: Optional[int] = None
@define(eq=False, slots=False)
class DigitalOceanContainerRegistryRepositoryTag(DigitalOceanResource, BaseResource):
"""DigitalOcean container registry repository tag"""
kind = "digitalocean_container_registry_repository_tag"
registry_name: Optional[str] = None
repository_name: Optional[str] = None
manifest_digest: Optional[str] = None
compressed_size_bytes: Optional[int] = None
size_bytes: Optional[int] = None
def delete_uri_path(self) -> Optional[str]:
return f"/registry/{self.registry_name}/repositories/{self.repository_name}/tags"
@define(eq=False, slots=False)
class DigitalOceanSSHKey(DigitalOceanResource, BaseKeyPair):
"""DigitalOcean ssh key"""
kind = "digitalocean_ssh_key"
public_key: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/account/keys"
@define(eq=False, slots=False)
class DigitalOceanDomain(DigitalOceanResource, BaseDNSZone):
"""DigitalOcean domain"""
kind = "digitalocean_domain"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["digitalocean_domain_record"],
"delete": [],
}
}
ttl: Optional[int] = None
zone_file: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/domains"
@define(eq=False, slots=False)
class DigitalOceanDomainRecord(DigitalOceanResource, BaseDNSRecord):
"""DigitalOcean domain record"""
kind = "digitalocean_domain_record"
domain_name: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return f"/domains/{self.domain_name}/records"
@define(eq=False, slots=False)
class DigitalOceanFirewall(DigitalOceanResource, BaseResource):
"""DigitalOcean firewall"""
kind = "digitalocean_firewall"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["digitalocean_droplet"],
"delete": [],
}
}
firewall_status: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/firewalls"
@define(eq=False, slots=False)
class DigitalOceanAlertPolicy(DigitalOceanResource, BaseResource):
"""DigitalOcean alert policy"""
kind = "digitalocean_alert_policy"
policy_type: Optional[str] = None
description: Optional[str] = None
is_enabled: Optional[bool] = None
def delete_uri_path(self) -> Optional[str]:
return "/monitoring/alerts" | /resoto_plugin_digitalocean-3.6.5-py3-none-any.whl/resoto_plugin_digitalocean/resources.py | 0.796767 | 0.166269 | resources.py | pypi |
from datetime import datetime
from attrs import define
from typing import Optional, ClassVar, List, Dict
from resotolib.graph import Graph
from resotolib.baseresources import (
BaseAccount,
BaseResource,
)
@define(eq=False, slots=False)
class DockerHubResource:
kind: ClassVar[str] = "dockerhub_resource"
def delete(self, graph: Graph) -> bool:
return False
def update_tag(self, key, value) -> bool:
return False
def delete_tag(self, key) -> bool:
return False
@define(eq=False, slots=False)
class DockerHubNamespace(DockerHubResource, BaseAccount):
kind: ClassVar[str] = "dockerhub_namespace"
count: Optional[int] = None
@define(eq=False, slots=False)
class DockerHubRepository(DockerHubResource, BaseResource):
kind: ClassVar[str] = "dockerhub_repository"
repository_type: Optional[str] = None
is_private: Optional[bool] = None
star_count: Optional[int] = None
pull_count: Optional[int] = None
affiliation: Optional[str] = None
media_types: Optional[List[str]] = None
@staticmethod
def new(data: Dict) -> BaseResource:
# Docker Hub API returns [None] for media types
# This removes all None values from the list
media_types = list(filter((None).__ne__, data.get("media_types", [])))
if len(media_types) == 0:
media_types = None
return DockerHubRepository(
id=data.get("name"),
repository_type=data.get("repository_type"),
is_private=data.get("is_private"),
star_count=data.get("star_count"),
pull_count=data.get("pull_count"),
mtime=convert_date(data.get("last_updated")),
ctime=convert_date(data.get("date_registered")),
affiliation=data.get("affiliation"),
media_types=media_types,
)
def convert_date(date_str: str) -> Optional[datetime]:
try:
return datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%S.%fZ")
except ValueError:
return None | /resoto-plugin-dockerhub-3.6.5.tar.gz/resoto-plugin-dockerhub-3.6.5/resoto_plugin_dockerhub/resources.py | 0.814311 | 0.170923 | resources.py | pypi |
import resotolib.logger
from attrs import define, field
from datetime import datetime
from typing import ClassVar, Dict, List, Optional
from resotolib.baseplugin import BaseCollectorPlugin
from resotolib.graph import ByNodeId, Graph, EdgeType, BySearchCriteria
from resotolib.args import ArgumentParser
from resotolib.config import Config
from resotolib.baseresources import (
BaseAccount,
BaseRegion,
BaseInstance,
BaseNetwork,
BaseResource,
BaseVolume,
InstanceStatus,
VolumeStatus,
)
log = resotolib.logger.getLogger("resoto." + __name__)
class ExampleCollectorPlugin(BaseCollectorPlugin):
cloud = "example"
def collect(self) -> None:
"""This method is being called by resoto whenever the collector runs
It is responsible for querying the cloud APIs for remote resources and adding
them to the plugin graph.
The graph root (self.graph.root) must always be followed by one or more
accounts. An account must always be followed by a region.
A region can contain arbitrary resources.
"""
log.debug("plugin: collecting example resources")
account = ExampleAccount(id="Example Account")
self.graph.add_resource(self.graph.root, account)
region1 = ExampleRegion(id="us-west", name="US West", tags={"Some Tag": "Some Value"})
self.graph.add_resource(account, region1)
region2 = ExampleRegion(id="us-east", name="US East", tags={"Some Tag": "Some Value"})
self.graph.add_resource(account, region2)
network1 = ExampleNetwork(id="someNetwork1", tags={"Name": "Example Network 1"})
network2 = ExampleNetwork(id="someNetwork2", tags={"Name": "Example Network 2"})
self.graph.add_resource(region1, network1)
self.graph.add_resource(region2, network2)
instance_status_map: Dict[str, InstanceStatus] = {
"pending": InstanceStatus.BUSY,
"running": InstanceStatus.RUNNING,
"shutting-down": InstanceStatus.BUSY,
"terminated": InstanceStatus.TERMINATED,
"stopping": InstanceStatus.BUSY,
"stopped": InstanceStatus.STOPPED,
}
instance1 = ExampleInstance(
id="someInstance1",
tags={"Name": "Example Instance 1", "expiration": "2d", "owner": "lukas"},
ctime=datetime.utcnow(),
atime=datetime.utcnow(),
mtime=datetime.utcnow(),
instance_cores=4,
instance_memory=32,
instance_status=instance_status_map.get("running", InstanceStatus.UNKNOWN),
)
self.graph.add_resource(region1, instance1)
self.graph.add_resource(network1, instance1)
self.graph.add_resource(network1, instance1, edge_type=EdgeType.delete)
instance2 = ExampleInstance(
id="someInstance2",
tags={
"Name": "Example Instance 2",
"expiration": "36h",
"resoto:ctime": "2019-09-05T10:40:11+00:00",
},
instance_status=instance_status_map.get("stopped", InstanceStatus.UNKNOWN),
)
self.graph.add_resource(region2, instance2)
self.graph.add_resource(network2, instance2)
self.graph.add_resource(network2, instance2, edge_type=EdgeType.delete)
volume1 = ExampleVolume(id="someVolume1", tags={"Name": "Example Volume 1"}, volume_status=VolumeStatus.IN_USE)
self.graph.add_resource(region1, volume1)
self.graph.add_edge(instance1, volume1)
self.graph.add_edge(volume1, instance1, edge_type=EdgeType.delete)
volume2 = ExampleVolume(
id="someVolume2", tags={"Name": "Example Volume 2"}, volume_status=VolumeStatus.AVAILABLE
)
self.graph.add_resource(region2, volume2)
self.graph.add_edge(instance2, volume2)
self.graph.add_edge(volume2, instance2, edge_type=EdgeType.delete)
self.graph.add_deferred_edge(
ByNodeId(instance1.chksum),
BySearchCriteria(f"is(instance) and reported.id = {instance2.id}"),
EdgeType.default,
)
custom_resource = ExampleCustomResource(
id="someExampleResource",
custom_optional_float_attribute=10.0,
custom_list_attribute=["foo", "bar"],
)
self.graph.add_resource(region1, custom_resource)
@staticmethod
def add_args(arg_parser: ArgumentParser) -> None:
"""Example of how to use the ArgumentParser
Can be accessed via ArgumentParser.args.example_arg
Note: almost all plugin config should be done via add_config()
so it can be changed centrally and at runtime.
"""
# arg_parser.add_argument(
# "--example-arg",
# help="Example Argument",
# dest="example_arg",
# type=str,
# default=None,
# nargs="+",
# )
pass
@staticmethod
def add_config(config: Config) -> None:
"""Add any plugin config to the global config store.
Method called by the PluginLoader upon plugin initialization.
Can be used to introduce plugin config arguments to the global config store.
"""
# config.add_config(ExampleConfig)
pass
@define
class ExampleConfig:
"""Example of how to use the resotocore config service
Can be accessed via Config.example.region
"""
kind: ClassVar[str] = "example"
region: Optional[List[str]] = field(default=None, metadata={"description": "Example Region"})
@define(eq=False, slots=False)
class ExampleAccount(BaseAccount):
"""Some example account"""
kind: ClassVar[str] = "example_account"
def delete(self, graph: Graph) -> bool:
return NotImplemented
@define(eq=False, slots=False)
class ExampleRegion(BaseRegion):
"""Some example region"""
kind: ClassVar[str] = "example_region"
def delete(self, graph: Graph) -> bool:
"""Regions can usually not be deleted so we return NotImplemented"""
return NotImplemented
@define(eq=False, slots=False)
class ExampleResource:
"""A class that implements the abstract method delete() as well as update_tag()
and delete_tag().
delete() must be implemented. update_tag() and delete_tag() are optional.
"""
kind: ClassVar[str] = "example_resource"
def delete(self, graph: Graph) -> bool:
"""Delete a resource in the cloud"""
log.debug(f"Deleting resource {self.id} in account {self.account(graph).id} region {self.region(graph).id}")
return True
def update_tag(self, key, value) -> bool:
"""Update a resource tag in the cloud"""
log.debug(f"Updating or setting tag {key}: {value} on resource {self.id}")
return True
def delete_tag(self, key) -> bool:
"""Delete a resource tag in the cloud"""
log.debug(f"Deleting tag {key} on resource {self.id}")
return True
@define(eq=False, slots=False)
class ExampleInstance(ExampleResource, BaseInstance):
"""An Example Instance Resource"""
kind: ClassVar[str] = "example_instance"
@define(eq=False, slots=False)
class ExampleVolume(ExampleResource, BaseVolume):
kind: ClassVar[str] = "example_volume"
@define(eq=False, slots=False)
class ExampleNetwork(ExampleResource, BaseNetwork):
"""Some example network
This is what instances and other networking related resources might reside in.
"""
kind: ClassVar[str] = "example_network"
@define(eq=False, slots=False)
class ExampleCustomResource(ExampleResource, BaseResource):
"""An example custom resource that only inherits the collectors
ExampleResource class as well as the BaseResource base class.
This is mainly an example of how to use typed Python dataclasses
from which the resoto data model is being generated.
"""
kind: ClassVar[str] = "example_custom_resource"
custom_string_attribute: str = ""
custom_int_attribute: int = 0
custom_optional_float_attribute: Optional[float] = None
custom_dict_attribute: Dict[str, str] = field(factory=dict)
custom_list_attribute: List[str] = field(factory=list) | /resoto_plugin_example_collector-3.6.5-py3-none-any.whl/resoto_plugin_example_collector/__init__.py | 0.751739 | 0.252278 | __init__.py | pypi |
import multiprocessing
from concurrent import futures
from typing import Optional, Dict, Any
import resotolib.proc
from resotolib.args import ArgumentParser
from resotolib.args import Namespace
from resotolib.baseplugin import BaseCollectorPlugin
from resotolib.baseresources import Cloud
from resotolib.config import Config, RunningConfig
from resotolib.core.actions import CoreFeedback
from resotolib.graph import Graph
from resotolib.logger import log, setup_logger
from .collector import GcpProjectCollector
from .config import GcpConfig
from .resources.base import GcpProject
from .utils import Credentials
class GCPCollectorPlugin(BaseCollectorPlugin):
"""Google Cloud Platform resoto collector plugin.
Gets instantiated in resoto's Processor thread. The collect() method
is run during a resource collection loop.
"""
cloud = "gcp"
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.core_feedback: Optional[CoreFeedback] = None
def collect(self) -> None:
"""Run by resoto during the global collect() run.
This method kicks off code that adds GCP resources to `self.graph`.
When collect() finishes the parent thread will take `self.graph` and merge
it with the global production graph.
"""
log.debug("plugin: GCP collecting resources")
assert self.core_feedback, "core_feedback is not set" # will be set by the outer collector plugin
feedback = self.core_feedback.with_context("gcp")
cloud = Cloud(id=self.cloud, name="Gcp")
credentials = Credentials.all(feedback)
if len(Config.gcp.project) > 0:
for project in list(credentials.keys()):
if project not in Config.gcp.project:
del credentials[project]
if len(credentials) == 0:
return
max_workers = (
len(credentials) if len(credentials) < Config.gcp.project_pool_size else Config.gcp.project_pool_size
)
collect_args = {}
pool_args = {"max_workers": max_workers}
pool_executor = futures.ThreadPoolExecutor
if Config.gcp.fork_process:
collect_args = {
"args": ArgumentParser.args,
"running_config": Config.running_config,
"credentials": credentials if all(v is None for v in credentials.values()) else None,
}
collect_method = collect_in_process
else:
collect_method = self.collect_project
with pool_executor(**pool_args) as executor:
# noinspection PyTypeChecker
wait_for = [
executor.submit(collect_method, project_id, feedback, cloud, **collect_args)
for project_id in credentials.keys()
]
for future in futures.as_completed(wait_for):
project_graph = future.result()
if not isinstance(project_graph, Graph):
log.error(f"Skipping invalid project_graph {type(project_graph)}")
continue
self.send_account_graph(project_graph)
del project_graph
@staticmethod
def collect_project(
project_id: str,
core_feedback: CoreFeedback,
cloud: Cloud,
args: Optional[Namespace] = None,
running_config: Optional[RunningConfig] = None,
credentials: Optional[Dict[str, Any]] = None,
) -> Optional[Graph]:
"""Collects an individual project.
Is being called in collect() and either run within a thread or a spawned
process. Depending on whether `gcp.fork_process` was specified or not.
Because the spawned process does not inherit any of our memory or file
descriptors we are passing the already parsed `args` Namespace() to this
method.
"""
project = GcpProject(id=project_id, name=project_id)
collector_name = f"gcp_{project_id}"
resotolib.proc.set_thread_name(collector_name)
if args is not None:
ArgumentParser.args = args
setup_logger("resotoworker-gcp", force=True, level=getattr(args, "log_level", None))
if running_config is not None:
Config.running_config.apply(running_config)
if credentials is not None:
Credentials._credentials = credentials
Credentials._initialized = True
log.debug(f"Starting new collect process for project {project.dname}")
try:
core_feedback.progress_done(project_id, 0, 1)
gpc = GcpProjectCollector(Config.gcp, cloud, project, core_feedback)
gpc.collect()
core_feedback.progress_done(project_id, 1, 1)
except Exception as ex:
core_feedback.with_context("gcp", project_id).error(f"Failed to collect project: {ex}", log)
return None
else:
return gpc.graph
@staticmethod
def add_config(config: Config) -> None:
"""Called by resoto upon startup to populate the Config store"""
config.add_config(GcpConfig)
def collect_project_proxy(*args, queue: multiprocessing.Queue, **kwargs) -> None: # type: ignore
resotolib.proc.collector_initializer()
queue.put(GCPCollectorPlugin.collect_project(*args, **kwargs))
def collect_in_process(*args, **kwargs) -> Optional[Graph]: # type: ignore
ctx = multiprocessing.get_context("spawn")
queue = ctx.Queue()
kwargs["queue"] = queue
process = ctx.Process(target=collect_project_proxy, args=args, kwargs=kwargs)
process.start()
graph = queue.get()
process.join()
return graph # type: ignore | /resoto-plugin-gcp-3.6.5.tar.gz/resoto-plugin-gcp-3.6.5/resoto_plugin_gcp/__init__.py | 0.779154 | 0.182116 | __init__.py | pypi |
from datetime import datetime
from typing import ClassVar, Dict, Optional, List, Type, cast
from attr import define, field
from resoto_plugin_gcp.gcp_client import GcpApiSpec
from resoto_plugin_gcp.resources.base import GcpResource, GcpDeprecationStatus, GraphBuilder
from resotolib.baseresources import ModelReference
from resotolib.json_bender import Bender, S, Bend, ForallBend
from resotolib.types import Json
# This service is called Cloud Billing in the documentation
# https://cloud.google.com/billing/docs
# API https://googleapis.github.io/google-api-python-client/docs/dyn/cloudbilling_v1.html
@define(eq=False, slots=False)
class GcpBillingAccount(GcpResource):
kind: ClassVar[str] = "gcp_billing_account"
reference_kinds: ClassVar[ModelReference] = {
"successors": {"default": ["gcp_project_billing_info"]},
}
api_spec: ClassVar[GcpApiSpec] = GcpApiSpec(
service="cloudbilling",
version="v1",
accessors=["billingAccounts"],
action="list",
request_parameter={},
request_parameter_in=set(),
response_path="billingAccounts",
response_regional_sub_path=None,
required_iam_permissions=[], # does not require any permissions
mutate_iam_permissions=[], # can not be deleted
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("name").or_else(S("id")).or_else(S("selfLink")),
"tags": S("labels", default={}),
"name": S("name"),
"ctime": S("creationTimestamp"),
"description": S("description"),
"link": S("selfLink"),
"label_fingerprint": S("labelFingerprint"),
"deprecation_status": S("deprecated", default={}) >> Bend(GcpDeprecationStatus.mapping),
"display_name": S("displayName"),
"master_billing_account": S("masterBillingAccount"),
"open": S("open"),
}
display_name: Optional[str] = field(default=None)
master_billing_account: Optional[str] = field(default=None)
open: Optional[bool] = field(default=None)
def post_process(self, graph_builder: GraphBuilder, source: Json) -> None:
for info in GcpProjectBillingInfo.collect_resources(graph_builder, name=self.name):
graph_builder.add_edge(self, node=info)
@classmethod
def called_collect_apis(cls) -> List[GcpApiSpec]:
return [cls.api_spec, GcpProjectBillingInfo.api_spec]
@define(eq=False, slots=False)
class GcpProjectBillingInfo(GcpResource):
kind: ClassVar[str] = "gcp_project_billing_info"
api_spec: ClassVar[GcpApiSpec] = GcpApiSpec(
service="cloudbilling",
version="v1",
accessors=["billingAccounts", "projects"],
action="list",
request_parameter={"name": "{name}"},
request_parameter_in={"name"},
response_path="projectBillingInfo",
response_regional_sub_path=None,
# valid permission name according to documentation, but gcloud emits an error
# required_iam_permissions=["billing.resourceAssociations.list"],
required_iam_permissions=[],
mutate_iam_permissions=["billing.resourceAssociations.delete"],
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("name").or_else(S("id")).or_else(S("selfLink")),
"tags": S("labels", default={}),
"name": S("name"),
"ctime": S("creationTimestamp"),
"description": S("description"),
"link": S("selfLink"),
"label_fingerprint": S("labelFingerprint"),
"deprecation_status": S("deprecated", default={}) >> Bend(GcpDeprecationStatus.mapping),
"billing_account_name": S("billingAccountName"),
"billing_enabled": S("billingEnabled"),
"project_billing_info_project_id": S("projectId"),
}
billing_account_name: Optional[str] = field(default=None)
billing_enabled: Optional[bool] = field(default=None)
project_billing_info_project_id: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpService(GcpResource):
kind: ClassVar[str] = "gcp_service"
reference_kinds: ClassVar[ModelReference] = {
"successors": {"default": ["gcp_sku"]},
}
api_spec: ClassVar[GcpApiSpec] = GcpApiSpec(
service="cloudbilling",
version="v1",
accessors=["services"],
action="list",
request_parameter={},
request_parameter_in=set(),
response_path="services",
response_regional_sub_path=None,
required_iam_permissions=[], # does not require any permissions
mutate_iam_permissions=[], # can not be deleted
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("serviceId"),
"tags": S("labels", default={}),
"name": S("name"),
"display_name": S("displayName"),
"ctime": S("creationTimestamp"),
"description": S("description"),
"link": S("selfLink"),
"label_fingerprint": S("labelFingerprint"),
"deprecation_status": S("deprecated", default={}) >> Bend(GcpDeprecationStatus.mapping),
"business_entity_name": S("businessEntityName"),
}
business_entity_name: Optional[str] = field(default=None)
display_name: Optional[str] = field(default=None)
@classmethod
def collect(cls: Type[GcpResource], raw: List[Json], builder: GraphBuilder) -> List[GcpResource]:
# Additional behavior: iterate over list of collected GcpService and for each:
# - collect related GcpSku
result: List[GcpResource] = super().collect(raw, builder) # type: ignore
SERVICES_COLLECT_LIST = [
"Compute Engine",
]
service_names = [
service.name for service in cast(List[GcpService], result) if service.display_name in SERVICES_COLLECT_LIST
]
for service_name in service_names:
builder.submit_work(GcpSku.collect_resources, builder, parent=service_name)
return result
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
def filter(node: GcpResource) -> bool:
return isinstance(node, GcpSku) and node.name is not None and node.name.startswith(self.id)
builder.add_edges(self, filter=filter)
@classmethod
def called_collect_apis(cls) -> List[GcpApiSpec]:
return [cls.api_spec, GcpSku.api_spec]
@define(eq=False, slots=False)
class GcpCategory:
kind: ClassVar[str] = "gcp_category"
mapping: ClassVar[Dict[str, Bender]] = {
"resource_family": S("resourceFamily"),
"resource_group": S("resourceGroup"),
"service_display_name": S("serviceDisplayName"),
"usage_type": S("usageType"),
}
resource_family: Optional[str] = field(default=None)
resource_group: Optional[str] = field(default=None)
service_display_name: Optional[str] = field(default=None)
usage_type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpGeoTaxonomy:
kind: ClassVar[str] = "gcp_geo_taxonomy"
mapping: ClassVar[Dict[str, Bender]] = {"regions": S("regions", default=[]), "type": S("type")}
regions: List[str] = field(factory=list)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpAggregationInfo:
kind: ClassVar[str] = "gcp_aggregation_info"
mapping: ClassVar[Dict[str, Bender]] = {
"aggregation_count": S("aggregationCount"),
"aggregation_interval": S("aggregationInterval"),
"aggregation_level": S("aggregationLevel"),
}
aggregation_count: Optional[int] = field(default=None)
aggregation_interval: Optional[str] = field(default=None)
aggregation_level: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpMoney:
kind: ClassVar[str] = "gcp_money"
mapping: ClassVar[Dict[str, Bender]] = {
"currency_code": S("currencyCode"),
"nanos": S("nanos"),
"units": S("units"),
}
currency_code: Optional[str] = field(default=None)
nanos: Optional[int] = field(default=None)
units: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpTierRate:
kind: ClassVar[str] = "gcp_tier_rate"
mapping: ClassVar[Dict[str, Bender]] = {
"start_usage_amount": S("startUsageAmount"),
"unit_price": S("unitPrice", default={}) >> Bend(GcpMoney.mapping),
}
start_usage_amount: Optional[float] = field(default=None)
unit_price: Optional[GcpMoney] = field(default=None)
@define(eq=False, slots=False)
class GcpPricingExpression:
kind: ClassVar[str] = "gcp_pricing_expression"
mapping: ClassVar[Dict[str, Bender]] = {
"base_unit": S("baseUnit"),
"base_unit_conversion_factor": S("baseUnitConversionFactor"),
"base_unit_description": S("baseUnitDescription"),
"display_quantity": S("displayQuantity"),
"tiered_rates": S("tieredRates", default=[]) >> ForallBend(GcpTierRate.mapping),
"usage_unit": S("usageUnit"),
"usage_unit_description": S("usageUnitDescription"),
}
base_unit: Optional[str] = field(default=None)
base_unit_conversion_factor: Optional[float] = field(default=None)
base_unit_description: Optional[str] = field(default=None)
display_quantity: Optional[float] = field(default=None)
tiered_rates: List[GcpTierRate] = field(factory=list)
usage_unit: Optional[str] = field(default=None)
usage_unit_description: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpPricingInfo:
kind: ClassVar[str] = "gcp_pricing_info"
mapping: ClassVar[Dict[str, Bender]] = {
"aggregation_info": S("aggregationInfo", default={}) >> Bend(GcpAggregationInfo.mapping),
"currency_conversion_rate": S("currencyConversionRate"),
"effective_time": S("effectiveTime"),
"pricing_expression": S("pricingExpression", default={}) >> Bend(GcpPricingExpression.mapping),
"summary": S("summary"),
}
aggregation_info: Optional[GcpAggregationInfo] = field(default=None)
currency_conversion_rate: Optional[float] = field(default=None)
effective_time: Optional[datetime] = field(default=None)
pricing_expression: Optional[GcpPricingExpression] = field(default=None)
summary: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSku(GcpResource):
kind: ClassVar[str] = "gcp_sku"
api_spec: ClassVar[GcpApiSpec] = GcpApiSpec(
service="cloudbilling",
version="v1",
accessors=["services", "skus"],
action="list",
request_parameter={"parent": "{parent}"},
request_parameter_in={"parent"},
response_path="skus",
response_regional_sub_path=None,
required_iam_permissions=[], # does not require any permissions
mutate_iam_permissions=[], # can not be deleted
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("skuId"),
"tags": S("labels", default={}),
"name": S("name"),
"ctime": S("creationTimestamp"),
"description": S("description"),
"link": S("selfLink"),
"label_fingerprint": S("labelFingerprint"),
"deprecation_status": S("deprecated", default={}) >> Bend(GcpDeprecationStatus.mapping),
"category": S("category", default={}) >> Bend(GcpCategory.mapping),
"geo_taxonomy": S("geoTaxonomy", default={}) >> Bend(GcpGeoTaxonomy.mapping),
"sku_pricing_info": S("pricingInfo", default=[]) >> ForallBend(GcpPricingInfo.mapping),
"service_provider_name": S("serviceProviderName"),
"service_regions": S("serviceRegions", default=[]),
"sku_id": S("skuId"),
}
category: Optional[GcpCategory] = field(default=None)
geo_taxonomy: Optional[GcpGeoTaxonomy] = field(default=None)
sku_pricing_info: List[GcpPricingInfo] = field(factory=list)
service_provider_name: Optional[str] = field(default=None)
service_regions: List[str] = field(factory=list)
usage_unit_nanos: Optional[int] = field(default=None)
def post_process(self, graph_builder: GraphBuilder, source: Json) -> None:
if len(self.sku_pricing_info) > 0:
if not (pricing_expression := self.sku_pricing_info[0].pricing_expression):
return
tiered_rates = pricing_expression.tiered_rates
cost = -1
if len(tiered_rates) == 1:
if tiered_rates[0].unit_price and tiered_rates[0].unit_price.nanos:
cost = tiered_rates[0].unit_price.nanos
else:
for tiered_rate in tiered_rates:
if sua := tiered_rate.start_usage_amount:
if sua > 0:
if tiered_rate.unit_price and tiered_rate.unit_price.nanos:
cost = tiered_rate.unit_price.nanos
break
if cost > -1:
self.usage_unit_nanos = cost
resources = [GcpBillingAccount, GcpService] | /resoto-plugin-gcp-3.6.5.tar.gz/resoto-plugin-gcp-3.6.5/resoto_plugin_gcp/resources/billing.py | 0.834373 | 0.201126 | billing.py | pypi |
import logging
from datetime import datetime
from typing import ClassVar, Dict, Optional, List, Type
from attr import define, field
from resoto_plugin_gcp.gcp_client import GcpApiSpec
from resoto_plugin_gcp.resources.base import GcpResource, GcpDeprecationStatus, GraphBuilder
from resoto_plugin_gcp.resources.compute import GcpSslCertificate
from resotolib.baseresources import ModelReference
from resotolib.json_bender import Bender, S, Bend, ForallBend, K
from resotolib.types import Json
log = logging.getLogger("resoto.plugins.gcp")
@define(eq=False, slots=False)
class GcpSqlOperationError:
kind: ClassVar[str] = "gcp_sql_operation_error"
mapping: ClassVar[Dict[str, Bender]] = {"code": S("code"), "message": S("message")}
code: Optional[str] = field(default=None)
message: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlBackupRun(GcpResource):
# collected via GcpSqlDatabaseInstance
kind: ClassVar[str] = "gcp_sql_backup_run"
reference_kinds: ClassVar[ModelReference] = {"predecessors": {"default": ["gcp_database_instance"]}}
api_spec: ClassVar[GcpApiSpec] = GcpApiSpec(
service="sqladmin",
version="v1",
accessors=["backupRuns"],
action="list",
request_parameter={"instance": "{instance}", "project": "{project}"},
request_parameter_in={"instance", "project"},
response_path="items",
response_regional_sub_path=None,
required_iam_permissions=["cloudsql.backupRuns.list"],
mutate_iam_permissions=["cloudsql.backupRuns.delete"],
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("name").or_else(S("id")).or_else(S("selfLink")),
"tags": S("labels", default={}),
"name": S("name"),
"ctime": S("creationTimestamp"),
"description": S("description"),
"link": S("selfLink"),
"label_fingerprint": S("labelFingerprint"),
"deprecation_status": S("deprecated", default={}) >> Bend(GcpDeprecationStatus.mapping),
"backup_kind": S("backupKind"),
"disk_encryption_configuration": S("diskEncryptionConfiguration", "kmsKeyName"),
"disk_encryption_status": S("diskEncryptionStatus", "kmsKeyVersionName"),
"end_time": S("endTime"),
"enqueued_time": S("enqueuedTime"),
"sql_operation_error": S("error", default={}) >> Bend(GcpSqlOperationError.mapping),
"instance": S("instance"),
"location": S("location"),
"start_time": S("startTime"),
"status": S("status"),
"time_zone": S("timeZone"),
"type": S("type"),
"window_start_time": S("windowStartTime"),
}
backup_kind: Optional[str] = field(default=None)
disk_encryption_configuration: Optional[str] = field(default=None)
disk_encryption_status: Optional[str] = field(default=None)
end_time: Optional[datetime] = field(default=None)
enqueued_time: Optional[datetime] = field(default=None)
sql_operation_error: Optional[GcpSqlOperationError] = field(default=None)
instance: Optional[str] = field(default=None)
location: Optional[str] = field(default=None)
start_time: Optional[datetime] = field(default=None)
status: Optional[str] = field(default=None)
time_zone: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
window_start_time: Optional[datetime] = field(default=None)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
if self.instance:
builder.add_edge(self, reverse=True, clazz=GcpSqlDatabaseInstance, name=self.instance)
@define(eq=False, slots=False)
class GcpSqlSqlServerDatabaseDetails:
kind: ClassVar[str] = "gcp_sql_sql_server_database_details"
mapping: ClassVar[Dict[str, Bender]] = {
"compatibility_level": S("compatibilityLevel"),
"recovery_model": S("recoveryModel"),
}
compatibility_level: Optional[int] = field(default=None)
recovery_model: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlDatabase(GcpResource):
# collected via GcpSqlDatabaseInstance
kind: ClassVar[str] = "gcp_sql_database"
api_spec: ClassVar[GcpApiSpec] = GcpApiSpec(
service="sqladmin",
version="v1",
accessors=["databases"],
action="list",
request_parameter={"instance": "{instance}", "project": "{project}"},
request_parameter_in={"instance", "project"},
response_path="items",
response_regional_sub_path=None,
required_iam_permissions=["cloudsql.databases.list"],
mutate_iam_permissions=["cloudsql.databases.update", "cloudsql.databases.delete"],
)
reference_kinds: ClassVar[ModelReference] = {"predecessors": {"default": ["gcp_sql_database_instance"]}}
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("name").or_else(S("id")).or_else(S("selfLink")),
"tags": S("labels", default={}),
"name": S("name"),
"ctime": S("creationTimestamp"),
"description": S("description"),
"link": S("selfLink"),
"label_fingerprint": S("labelFingerprint"),
"deprecation_status": S("deprecated", default={}) >> Bend(GcpDeprecationStatus.mapping),
"charset": S("charset"),
"collation": S("collation"),
"etag": S("etag"),
"instance": S("instance"),
"project": S("project"),
"sqlserver_database_details": S("sqlserverDatabaseDetails", default={})
>> Bend(GcpSqlSqlServerDatabaseDetails.mapping),
}
charset: Optional[str] = field(default=None)
collation: Optional[str] = field(default=None)
etag: Optional[str] = field(default=None)
instance: Optional[str] = field(default=None)
project: Optional[str] = field(default=None)
sqlserver_database_details: Optional[GcpSqlSqlServerDatabaseDetails] = field(default=None)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
if self.instance:
builder.add_edge(self, reverse=True, clazz=GcpSqlDatabaseInstance, name=self.instance)
@define(eq=False, slots=False)
class GcpSqlFailoverreplica:
kind: ClassVar[str] = "gcp_sql_failoverreplica"
mapping: ClassVar[Dict[str, Bender]] = {"available": S("available"), "name": S("name")}
available: Optional[bool] = field(default=None)
name: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlIpMapping:
kind: ClassVar[str] = "gcp_sql_ip_mapping"
mapping: ClassVar[Dict[str, Bender]] = {
"ip_address": S("ipAddress"),
"time_to_retire": S("timeToRetire"),
"type": S("type"),
}
ip_address: Optional[str] = field(default=None)
time_to_retire: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlInstanceReference:
kind: ClassVar[str] = "gcp_sql_instance_reference"
mapping: ClassVar[Dict[str, Bender]] = {"name": S("name"), "project": S("project"), "region": S("region")}
name: Optional[str] = field(default=None)
project: Optional[str] = field(default=None)
region: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlOnPremisesConfiguration:
kind: ClassVar[str] = "gcp_sql_on_premises_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"ca_certificate": S("caCertificate"),
"client_certificate": S("clientCertificate"),
"client_key": S("clientKey"),
"dump_file_path": S("dumpFilePath"),
"host_port": S("hostPort"),
"password": S("password"),
"source_instance": S("sourceInstance", default={}) >> Bend(GcpSqlInstanceReference.mapping),
"username": S("username"),
}
ca_certificate: Optional[str] = field(default=None)
client_certificate: Optional[str] = field(default=None)
client_key: Optional[str] = field(default=None)
dump_file_path: Optional[str] = field(default=None)
host_port: Optional[str] = field(default=None)
password: Optional[str] = field(default=None)
source_instance: Optional[GcpSqlInstanceReference] = field(default=None)
username: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlSqlOutOfDiskReport:
kind: ClassVar[str] = "gcp_sql_sql_out_of_disk_report"
mapping: ClassVar[Dict[str, Bender]] = {
"sql_min_recommended_increase_size_gb": S("sqlMinRecommendedIncreaseSizeGb"),
"sql_out_of_disk_state": S("sqlOutOfDiskState"),
}
sql_min_recommended_increase_size_gb: Optional[int] = field(default=None)
sql_out_of_disk_state: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlMySqlReplicaConfiguration:
kind: ClassVar[str] = "gcp_sql_my_sql_replica_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"ca_certificate": S("caCertificate"),
"client_certificate": S("clientCertificate"),
"client_key": S("clientKey"),
"connect_retry_interval": S("connectRetryInterval"),
"dump_file_path": S("dumpFilePath"),
"master_heartbeat_period": S("masterHeartbeatPeriod"),
"password": S("password"),
"ssl_cipher": S("sslCipher"),
"username": S("username"),
"verify_server_certificate": S("verifyServerCertificate"),
}
ca_certificate: Optional[str] = field(default=None)
client_certificate: Optional[str] = field(default=None)
client_key: Optional[str] = field(default=None)
connect_retry_interval: Optional[int] = field(default=None)
dump_file_path: Optional[str] = field(default=None)
master_heartbeat_period: Optional[str] = field(default=None)
password: Optional[str] = field(default=None)
ssl_cipher: Optional[str] = field(default=None)
username: Optional[str] = field(default=None)
verify_server_certificate: Optional[bool] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlReplicaConfiguration:
kind: ClassVar[str] = "gcp_sql_replica_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"failover_target": S("failoverTarget"),
"mysql_replica_configuration": S("mysqlReplicaConfiguration", default={})
>> Bend(GcpSqlMySqlReplicaConfiguration.mapping),
}
failover_target: Optional[bool] = field(default=None)
mysql_replica_configuration: Optional[GcpSqlMySqlReplicaConfiguration] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlSqlScheduledMaintenance:
kind: ClassVar[str] = "gcp_sql_sql_scheduled_maintenance"
mapping: ClassVar[Dict[str, Bender]] = {
"can_defer": S("canDefer"),
"can_reschedule": S("canReschedule"),
"schedule_deadline_time": S("scheduleDeadlineTime"),
"start_time": S("startTime"),
}
can_defer: Optional[bool] = field(default=None)
can_reschedule: Optional[bool] = field(default=None)
schedule_deadline_time: Optional[datetime] = field(default=None)
start_time: Optional[datetime] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlSslCert:
kind: ClassVar[str] = "gcp_sql_ssl_cert"
mapping: ClassVar[Dict[str, Bender]] = {
"cert": S("cert"),
"cert_serial_number": S("certSerialNumber"),
"common_name": S("commonName"),
"create_time": S("createTime"),
"expiration_time": S("expirationTime"),
"instance": S("instance"),
"self_link": S("selfLink"),
"sha1_fingerprint": S("sha1Fingerprint"),
}
cert: Optional[str] = field(default=None)
cert_serial_number: Optional[str] = field(default=None)
common_name: Optional[str] = field(default=None)
create_time: Optional[datetime] = field(default=None)
expiration_time: Optional[datetime] = field(default=None)
instance: Optional[str] = field(default=None)
self_link: Optional[str] = field(default=None)
sha1_fingerprint: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlBackupRetentionSettings:
kind: ClassVar[str] = "gcp_sql_backup_retention_settings"
mapping: ClassVar[Dict[str, Bender]] = {
"retained_backups": S("retainedBackups"),
"retention_unit": S("retentionUnit"),
}
retained_backups: Optional[int] = field(default=None)
retention_unit: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlBackupConfiguration:
kind: ClassVar[str] = "gcp_sql_backup_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"backup_retention_settings": S("backupRetentionSettings", default={})
>> Bend(GcpSqlBackupRetentionSettings.mapping),
"binary_log_enabled": S("binaryLogEnabled"),
"enabled": S("enabled"),
"location": S("location"),
"point_in_time_recovery_enabled": S("pointInTimeRecoveryEnabled"),
"replication_log_archiving_enabled": S("replicationLogArchivingEnabled"),
"start_time": S("startTime"),
"transaction_log_retention_days": S("transactionLogRetentionDays"),
}
backup_retention_settings: Optional[GcpSqlBackupRetentionSettings] = field(default=None)
binary_log_enabled: Optional[bool] = field(default=None)
enabled: Optional[bool] = field(default=None)
location: Optional[str] = field(default=None)
point_in_time_recovery_enabled: Optional[bool] = field(default=None)
replication_log_archiving_enabled: Optional[bool] = field(default=None)
start_time: Optional[str] = field(default=None)
transaction_log_retention_days: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlDatabaseFlags:
kind: ClassVar[str] = "gcp_sql_database_flags"
mapping: ClassVar[Dict[str, Bender]] = {"name": S("name"), "value": S("value")}
name: Optional[str] = field(default=None)
value: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlDenyMaintenancePeriod:
kind: ClassVar[str] = "gcp_sql_deny_maintenance_period"
mapping: ClassVar[Dict[str, Bender]] = {"end_date": S("endDate"), "start_date": S("startDate"), "time": S("time")}
end_date: Optional[str] = field(default=None)
start_date: Optional[str] = field(default=None)
time: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlInsightsConfig:
kind: ClassVar[str] = "gcp_sql_insights_config"
mapping: ClassVar[Dict[str, Bender]] = {
"query_insights_enabled": S("queryInsightsEnabled"),
"query_plans_per_minute": S("queryPlansPerMinute"),
"query_string_length": S("queryStringLength"),
"record_application_tags": S("recordApplicationTags"),
"record_client_address": S("recordClientAddress"),
}
query_insights_enabled: Optional[bool] = field(default=None)
query_plans_per_minute: Optional[int] = field(default=None)
query_string_length: Optional[int] = field(default=None)
record_application_tags: Optional[bool] = field(default=None)
record_client_address: Optional[bool] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlAclEntry:
kind: ClassVar[str] = "gcp_sql_acl_entry"
mapping: ClassVar[Dict[str, Bender]] = {
"expiration_time": S("expirationTime"),
"name": S("name"),
"value": S("value"),
}
expiration_time: Optional[datetime] = field(default=None)
name: Optional[str] = field(default=None)
value: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlIpConfiguration:
kind: ClassVar[str] = "gcp_sql_ip_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"allocated_ip_range": S("allocatedIpRange"),
"authorized_networks": S("authorizedNetworks", default=[]) >> ForallBend(GcpSqlAclEntry.mapping),
"ipv4_enabled": S("ipv4Enabled"),
"private_network": S("privateNetwork"),
"require_ssl": S("requireSsl"),
}
allocated_ip_range: Optional[str] = field(default=None)
authorized_networks: Optional[List[GcpSqlAclEntry]] = field(default=None)
ipv4_enabled: Optional[bool] = field(default=None)
private_network: Optional[str] = field(default=None)
require_ssl: Optional[bool] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlLocationPreference:
kind: ClassVar[str] = "gcp_sql_location_preference"
mapping: ClassVar[Dict[str, Bender]] = {
"follow_gae_application": S("followGaeApplication"),
"secondary_zone": S("secondaryZone"),
"zone": S("zone"),
}
follow_gae_application: Optional[str] = field(default=None)
secondary_zone: Optional[str] = field(default=None)
zone: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlMaintenanceWindow:
kind: ClassVar[str] = "gcp_sql_maintenance_window"
mapping: ClassVar[Dict[str, Bender]] = {"day": S("day"), "hour": S("hour"), "update_track": S("updateTrack")}
day: Optional[int] = field(default=None)
hour: Optional[int] = field(default=None)
update_track: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlPasswordValidationPolicy:
kind: ClassVar[str] = "gcp_sql_password_validation_policy"
mapping: ClassVar[Dict[str, Bender]] = {
"complexity": S("complexity"),
"disallow_username_substring": S("disallowUsernameSubstring"),
"enable_password_policy": S("enablePasswordPolicy"),
"min_length": S("minLength"),
"password_change_interval": S("passwordChangeInterval"),
"reuse_interval": S("reuseInterval"),
}
complexity: Optional[str] = field(default=None)
disallow_username_substring: Optional[bool] = field(default=None)
enable_password_policy: Optional[bool] = field(default=None)
min_length: Optional[int] = field(default=None)
password_change_interval: Optional[str] = field(default=None)
reuse_interval: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlSqlServerAuditConfig:
kind: ClassVar[str] = "gcp_sql_sql_server_audit_config"
mapping: ClassVar[Dict[str, Bender]] = {
"bucket": S("bucket"),
"retention_interval": S("retentionInterval"),
"upload_interval": S("uploadInterval"),
}
bucket: Optional[str] = field(default=None)
retention_interval: Optional[str] = field(default=None)
upload_interval: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlSettings:
kind: ClassVar[str] = "gcp_sql_settings"
mapping: ClassVar[Dict[str, Bender]] = {
"activation_policy": S("activationPolicy"),
"active_directory_config": S("activeDirectoryConfig", "domain"),
"authorized_gae_applications": S("authorizedGaeApplications", default=[]),
"availability_type": S("availabilityType"),
"backup_configuration": S("backupConfiguration", default={}) >> Bend(GcpSqlBackupConfiguration.mapping),
"collation": S("collation"),
"connector_enforcement": S("connectorEnforcement"),
"crash_safe_replication_enabled": S("crashSafeReplicationEnabled"),
"data_disk_size_gb": S("dataDiskSizeGb"),
"data_disk_type": S("dataDiskType"),
"database_flags": S("databaseFlags", default=[]) >> ForallBend(GcpSqlDatabaseFlags.mapping),
"database_replication_enabled": S("databaseReplicationEnabled"),
"deletion_protection_enabled": S("deletionProtectionEnabled"),
"deny_maintenance_periods": S("denyMaintenancePeriods", default=[])
>> ForallBend(GcpSqlDenyMaintenancePeriod.mapping),
"insights_config": S("insightsConfig", default={}) >> Bend(GcpSqlInsightsConfig.mapping),
"ip_configuration": S("ipConfiguration", default={}) >> Bend(GcpSqlIpConfiguration.mapping),
"location_preference": S("locationPreference", default={}) >> Bend(GcpSqlLocationPreference.mapping),
"maintenance_window": S("maintenanceWindow", default={}) >> Bend(GcpSqlMaintenanceWindow.mapping),
"password_validation_policy": S("passwordValidationPolicy", default={})
>> Bend(GcpSqlPasswordValidationPolicy.mapping),
"pricing_plan": S("pricingPlan"),
"replication_type": S("replicationType"),
"settings_version": S("settingsVersion"),
"sql_server_audit_config": S("sqlServerAuditConfig", default={}) >> Bend(GcpSqlSqlServerAuditConfig.mapping),
"storage_auto_resize": S("storageAutoResize"),
"storage_auto_resize_limit": S("storageAutoResizeLimit"),
"tier": S("tier"),
"time_zone": S("timeZone"),
"user_labels": S("userLabels"),
}
activation_policy: Optional[str] = field(default=None)
active_directory_config: Optional[str] = field(default=None)
authorized_gae_applications: Optional[List[str]] = field(default=None)
availability_type: Optional[str] = field(default=None)
backup_configuration: Optional[GcpSqlBackupConfiguration] = field(default=None)
collation: Optional[str] = field(default=None)
connector_enforcement: Optional[str] = field(default=None)
crash_safe_replication_enabled: Optional[bool] = field(default=None)
data_disk_size_gb: Optional[str] = field(default=None)
data_disk_type: Optional[str] = field(default=None)
database_flags: Optional[List[GcpSqlDatabaseFlags]] = field(default=None)
database_replication_enabled: Optional[bool] = field(default=None)
deletion_protection_enabled: Optional[bool] = field(default=None)
deny_maintenance_periods: Optional[List[GcpSqlDenyMaintenancePeriod]] = field(default=None)
insights_config: Optional[GcpSqlInsightsConfig] = field(default=None)
ip_configuration: Optional[GcpSqlIpConfiguration] = field(default=None)
location_preference: Optional[GcpSqlLocationPreference] = field(default=None)
maintenance_window: Optional[GcpSqlMaintenanceWindow] = field(default=None)
password_validation_policy: Optional[GcpSqlPasswordValidationPolicy] = field(default=None)
pricing_plan: Optional[str] = field(default=None)
replication_type: Optional[str] = field(default=None)
settings_version: Optional[str] = field(default=None)
sql_server_audit_config: Optional[GcpSqlSqlServerAuditConfig] = field(default=None)
storage_auto_resize: Optional[bool] = field(default=None)
storage_auto_resize_limit: Optional[str] = field(default=None)
tier: Optional[str] = field(default=None)
time_zone: Optional[str] = field(default=None)
user_labels: Optional[Dict[str, str]] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlDatabaseInstance(GcpResource):
kind: ClassVar[str] = "gcp_sql_database_instance"
reference_kinds: ClassVar[ModelReference] = {"predecessors": {"default": ["gcp_ssl_certificate"]}}
api_spec: ClassVar[GcpApiSpec] = GcpApiSpec(
service="sqladmin",
version="v1",
accessors=["instances"],
action="list",
request_parameter={"project": "{project}"},
request_parameter_in={"project"},
response_path="items",
response_regional_sub_path=None,
required_iam_permissions=["cloudsql.instances.list"],
mutate_iam_permissions=["cloudsql.instances.update", "cloudsql.instances.delete"],
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("name").or_else(S("id")).or_else(S("selfLink")),
"tags": S("labels", default={}),
"name": S("name"),
"ctime": S("createTime"),
"description": S("description"),
"link": S("selfLink"),
"label_fingerprint": S("labelFingerprint"),
"deprecation_status": S("deprecated", default={}) >> Bend(GcpDeprecationStatus.mapping),
"available_maintenance_versions": S("availableMaintenanceVersions", default=[]),
"backend_type": S("backendType"),
"connection_name": S("connectionName"),
"create_time": S("createTime"),
"current_disk_size": S("currentDiskSize"),
"database_installed_version": S("databaseInstalledVersion"),
"database_version": S("databaseVersion"),
"disk_encryption_configuration": S("diskEncryptionConfiguration", "kmsKeyName"),
"disk_encryption_status": S("diskEncryptionStatus", "kmsKeyVersionName"),
"etag": S("etag"),
"failover_replica": S("failoverReplica", default={}) >> Bend(GcpSqlFailoverreplica.mapping),
"gce_zone": S("gceZone"),
"instance_type": S("instanceType"),
"ip_addresses": S("ipAddresses", default=[]) >> ForallBend(GcpSqlIpMapping.mapping),
"ipv6_address": S("ipv6Address"),
"maintenance_version": S("maintenanceVersion"),
"master_instance_name": S("masterInstanceName"),
"max_disk_size": S("maxDiskSize"),
"on_premises_configuration": S("onPremisesConfiguration", default={})
>> Bend(GcpSqlOnPremisesConfiguration.mapping),
"out_of_disk_report": S("outOfDiskReport", default={}) >> Bend(GcpSqlSqlOutOfDiskReport.mapping),
"project": S("project"),
"replica_configuration": S("replicaConfiguration", default={}) >> Bend(GcpSqlReplicaConfiguration.mapping),
"replica_names": S("replicaNames", default=[]),
"root_password": S("rootPassword"),
"satisfies_pzs": S("satisfiesPzs"),
"scheduled_maintenance": S("scheduledMaintenance", default={}) >> Bend(GcpSqlSqlScheduledMaintenance.mapping),
"secondary_gce_zone": S("secondaryGceZone"),
"server_ca_cert": S("serverCaCert", default={}) >> Bend(GcpSqlSslCert.mapping),
"service_account_email_address": S("serviceAccountEmailAddress"),
"settings": S("settings", default={}) >> Bend(GcpSqlSettings.mapping),
"sql_database_instance_state": S("state"),
"suspension_reason": S("suspensionReason", default=[]),
}
available_maintenance_versions: Optional[List[str]] = field(default=None)
backend_type: Optional[str] = field(default=None)
connection_name: Optional[str] = field(default=None)
create_time: Optional[datetime] = field(default=None)
current_disk_size: Optional[str] = field(default=None)
database_installed_version: Optional[str] = field(default=None)
database_version: Optional[str] = field(default=None)
disk_encryption_configuration: Optional[str] = field(default=None)
disk_encryption_status: Optional[str] = field(default=None)
etag: Optional[str] = field(default=None)
failover_replica: Optional[GcpSqlFailoverreplica] = field(default=None)
gce_zone: Optional[str] = field(default=None)
instance_type: Optional[str] = field(default=None)
ip_addresses: Optional[List[GcpSqlIpMapping]] = field(default=None)
ipv6_address: Optional[str] = field(default=None)
maintenance_version: Optional[str] = field(default=None)
master_instance_name: Optional[str] = field(default=None)
max_disk_size: Optional[str] = field(default=None)
on_premises_configuration: Optional[GcpSqlOnPremisesConfiguration] = field(default=None)
out_of_disk_report: Optional[GcpSqlSqlOutOfDiskReport] = field(default=None)
project: Optional[str] = field(default=None)
replica_configuration: Optional[GcpSqlReplicaConfiguration] = field(default=None)
replica_names: Optional[List[str]] = field(default=None)
root_password: Optional[str] = field(default=None)
satisfies_pzs: Optional[bool] = field(default=None)
scheduled_maintenance: Optional[GcpSqlSqlScheduledMaintenance] = field(default=None)
secondary_gce_zone: Optional[str] = field(default=None)
server_ca_cert: Optional[GcpSqlSslCert] = field(default=None)
service_account_email_address: Optional[str] = field(default=None)
settings: Optional[GcpSqlSettings] = field(default=None)
sql_database_instance_state: Optional[str] = field(default=None)
suspension_reason: Optional[List[str]] = field(default=None)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
if cert := self.server_ca_cert:
if cert.self_link:
builder.add_edge(self, reverse=True, clazz=GcpSslCertificate, link=cert.self_link)
def post_process(self, graph_builder: GraphBuilder, source: Json) -> None:
classes: List[Type[GcpResource]] = [GcpSqlBackupRun, GcpSqlDatabase, GcpSqlUser, GcpSqlOperation]
for cls in classes:
if spec := cls.api_spec:
items = graph_builder.client.list(spec, instance=self.name, project=self.project)
cls.collect(items, graph_builder)
@classmethod
def called_collect_apis(cls) -> List[GcpApiSpec]:
return [
cls.api_spec,
GcpSqlBackupRun.api_spec,
GcpSqlDatabase.api_spec,
GcpSqlUser.api_spec,
GcpSqlOperation.api_spec,
]
@define(eq=False, slots=False)
class GcpSqlCsvexportoptions:
kind: ClassVar[str] = "gcp_sql_csvexportoptions"
mapping: ClassVar[Dict[str, Bender]] = {
"escape_character": S("escapeCharacter"),
"fields_terminated_by": S("fieldsTerminatedBy"),
"lines_terminated_by": S("linesTerminatedBy"),
"quote_character": S("quoteCharacter"),
"select_query": S("selectQuery"),
}
escape_character: Optional[str] = field(default=None)
fields_terminated_by: Optional[str] = field(default=None)
lines_terminated_by: Optional[str] = field(default=None)
quote_character: Optional[str] = field(default=None)
select_query: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlMysqlexportoptions:
kind: ClassVar[str] = "gcp_sql_mysqlexportoptions"
mapping: ClassVar[Dict[str, Bender]] = {"master_data": S("masterData")}
master_data: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlSqlexportoptions:
kind: ClassVar[str] = "gcp_sql_sqlexportoptions"
mapping: ClassVar[Dict[str, Bender]] = {
"mysql_export_options": S("mysqlExportOptions", default={}) >> Bend(GcpSqlMysqlexportoptions.mapping),
"schema_only": S("schemaOnly"),
"tables": S("tables", default=[]),
}
mysql_export_options: Optional[GcpSqlMysqlexportoptions] = field(default=None)
schema_only: Optional[bool] = field(default=None)
tables: Optional[List[str]] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlExportContext:
kind: ClassVar[str] = "gcp_sql_export_context"
mapping: ClassVar[Dict[str, Bender]] = {
"csv_export_options": S("csvExportOptions", default={}) >> Bend(GcpSqlCsvexportoptions.mapping),
"databases": S("databases", default=[]),
"file_type": S("fileType"),
"offload": S("offload"),
"sql_export_options": S("sqlExportOptions", default={}) >> Bend(GcpSqlSqlexportoptions.mapping),
"uri": S("uri"),
}
csv_export_options: Optional[GcpSqlCsvexportoptions] = field(default=None)
databases: Optional[List[str]] = field(default=None)
file_type: Optional[str] = field(default=None)
offload: Optional[bool] = field(default=None)
sql_export_options: Optional[GcpSqlSqlexportoptions] = field(default=None)
uri: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlEncryptionoptions:
kind: ClassVar[str] = "gcp_sql_encryptionoptions"
mapping: ClassVar[Dict[str, Bender]] = {
"cert_path": S("certPath"),
"pvk_password": S("pvkPassword"),
"pvk_path": S("pvkPath"),
}
cert_path: Optional[str] = field(default=None)
pvk_password: Optional[str] = field(default=None)
pvk_path: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlBakimportoptions:
kind: ClassVar[str] = "gcp_sql_bakimportoptions"
mapping: ClassVar[Dict[str, Bender]] = {
"encryption_options": S("encryptionOptions", default={}) >> Bend(GcpSqlEncryptionoptions.mapping)
}
encryption_options: Optional[GcpSqlEncryptionoptions] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlCsvimportoptions:
kind: ClassVar[str] = "gcp_sql_csvimportoptions"
mapping: ClassVar[Dict[str, Bender]] = {
"columns": S("columns", default=[]),
"escape_character": S("escapeCharacter"),
"fields_terminated_by": S("fieldsTerminatedBy"),
"lines_terminated_by": S("linesTerminatedBy"),
"quote_character": S("quoteCharacter"),
"table": S("table"),
}
columns: Optional[List[str]] = field(default=None)
escape_character: Optional[str] = field(default=None)
fields_terminated_by: Optional[str] = field(default=None)
lines_terminated_by: Optional[str] = field(default=None)
quote_character: Optional[str] = field(default=None)
table: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlImportContext:
kind: ClassVar[str] = "gcp_sql_import_context"
mapping: ClassVar[Dict[str, Bender]] = {
"bak_import_options": S("bakImportOptions", default={}) >> Bend(GcpSqlBakimportoptions.mapping),
"csv_import_options": S("csvImportOptions", default={}) >> Bend(GcpSqlCsvimportoptions.mapping),
"database": S("database"),
"file_type": S("fileType"),
"import_user": S("importUser"),
"uri": S("uri"),
}
bak_import_options: Optional[GcpSqlBakimportoptions] = field(default=None)
csv_import_options: Optional[GcpSqlCsvimportoptions] = field(default=None)
database: Optional[str] = field(default=None)
file_type: Optional[str] = field(default=None)
import_user: Optional[str] = field(default=None)
uri: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlOperation(GcpResource):
kind: ClassVar[str] = "gcp_sql_operation"
reference_kinds: ClassVar[ModelReference] = {"predecessors": {"default": ["gcp_sql_database_instance"]}}
api_spec: ClassVar[GcpApiSpec] = GcpApiSpec(
service="sqladmin",
version="v1",
accessors=["operations"],
action="list",
request_parameter={"instance": "{instance}", "project": "{project}"},
request_parameter_in={"project"},
response_path="items",
response_regional_sub_path=None,
required_iam_permissions=["cloudsql.instances.get"],
mutate_iam_permissions=["cloudsql.instances.update", "cloudsql.instances.delete"],
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("name").or_else(S("id")).or_else(S("selfLink")),
"tags": S("labels", default={}),
"name": S("name"),
"ctime": S("creationTimestamp"),
"description": S("description"),
"link": S("selfLink"),
"label_fingerprint": S("labelFingerprint"),
"deprecation_status": S("deprecated", default={}) >> Bend(GcpDeprecationStatus.mapping),
"backup_context": S("backupContext", "backupId"),
"end_time": S("endTime"),
"sql_operation_errors": S("error", "errors", default=[]) >> ForallBend(GcpSqlOperationError.mapping),
"export_context": S("exportContext", default={}) >> Bend(GcpSqlExportContext.mapping),
"import_context": S("importContext", default={}) >> Bend(GcpSqlImportContext.mapping),
"insert_time": S("insertTime"),
"operation_type": S("operationType"),
"start_time": S("startTime"),
"status": S("status"),
"target_id": S("targetId"),
"target_link": S("targetLink"),
"target_project": S("targetProject"),
"user": S("user"),
}
backup_context: Optional[str] = field(default=None)
end_time: Optional[datetime] = field(default=None)
sql_operation_errors: List[GcpSqlOperationError] = field(factory=list)
export_context: Optional[GcpSqlExportContext] = field(default=None)
import_context: Optional[GcpSqlImportContext] = field(default=None)
insert_time: Optional[datetime] = field(default=None)
operation_type: Optional[str] = field(default=None)
start_time: Optional[datetime] = field(default=None)
status: Optional[str] = field(default=None)
target_id: Optional[str] = field(default=None)
target_link: Optional[str] = field(default=None)
target_project: Optional[str] = field(default=None)
user: Optional[str] = field(default=None)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
if self.target_id:
builder.add_edge(self, reverse=True, clazz=GcpSqlDatabaseInstance, name=self.target_id)
@define(eq=False, slots=False)
class GcpSqlPasswordStatus:
kind: ClassVar[str] = "gcp_sql_password_status"
mapping: ClassVar[Dict[str, Bender]] = {
"locked": S("locked"),
"password_expiration_time": S("passwordExpirationTime"),
}
locked: Optional[bool] = field(default=None)
password_expiration_time: Optional[datetime] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlUserPasswordValidationPolicy:
kind: ClassVar[str] = "gcp_sql_user_password_validation_policy"
mapping: ClassVar[Dict[str, Bender]] = {
"allowed_failed_attempts": S("allowedFailedAttempts"),
"enable_failed_attempts_check": S("enableFailedAttemptsCheck"),
"enable_password_verification": S("enablePasswordVerification"),
"password_expiration_duration": S("passwordExpirationDuration"),
"status": S("status", default={}) >> Bend(GcpSqlPasswordStatus.mapping),
}
allowed_failed_attempts: Optional[int] = field(default=None)
enable_failed_attempts_check: Optional[bool] = field(default=None)
enable_password_verification: Optional[bool] = field(default=None)
password_expiration_duration: Optional[str] = field(default=None)
status: Optional[GcpSqlPasswordStatus] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlSqlServerUserDetails:
kind: ClassVar[str] = "gcp_sql_sql_server_user_details"
mapping: ClassVar[Dict[str, Bender]] = {"disabled": S("disabled"), "server_roles": S("serverRoles", default=[])}
disabled: Optional[bool] = field(default=None)
server_roles: Optional[List[str]] = field(default=None)
@define(eq=False, slots=False)
class GcpSqlUser(GcpResource):
# collected via GcpSqlDatabaseInstance
kind: ClassVar[str] = "gcp_sql_user"
api_spec: ClassVar[GcpApiSpec] = GcpApiSpec(
service="sqladmin",
version="v1",
accessors=["users"],
action="list",
request_parameter={"instance": "{instance}", "project": "{project}"},
request_parameter_in={"instance", "project"},
response_path="items",
response_regional_sub_path=None,
required_iam_permissions=["cloudsql.users.list"],
mutate_iam_permissions=["cloudsql.users.update", "cloudsql.users.delete"],
)
reference_kinds: ClassVar[ModelReference] = {"predecessors": {"default": ["gcp_sql_database_instance"]}}
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("name").or_else(K("(anonymous)@") + S("host", default="localhost")),
"tags": S("labels", default={}),
"name": S("name", default="(anonymous)"),
"ctime": S("creationTimestamp"),
"description": S("description"),
"link": S("selfLink"),
"label_fingerprint": S("labelFingerprint"),
"deprecation_status": S("deprecated", default={}) >> Bend(GcpDeprecationStatus.mapping),
"dual_password_type": S("dualPasswordType"),
"etag": S("etag"),
"host": S("host", default="localhost"),
"instance": S("instance"),
"password": S("password"),
"password_policy": S("passwordPolicy", default={}) >> Bend(GcpSqlUserPasswordValidationPolicy.mapping),
"project": S("project"),
"sqlserver_user_details": S("sqlserverUserDetails", default={}) >> Bend(GcpSqlSqlServerUserDetails.mapping),
"type": S("type"),
}
dual_password_type: Optional[str] = field(default=None)
etag: Optional[str] = field(default=None)
host: Optional[str] = field(default=None)
instance: Optional[str] = field(default=None)
password: Optional[str] = field(default=None)
password_policy: Optional[GcpSqlUserPasswordValidationPolicy] = field(default=None)
project: Optional[str] = field(default=None)
sqlserver_user_details: Optional[GcpSqlSqlServerUserDetails] = field(default=None)
type: Optional[str] = field(default=None)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
if self.instance:
builder.add_edge(self, reverse=True, clazz=GcpSqlDatabaseInstance)
resources = [GcpSqlDatabaseInstance] | /resoto-plugin-gcp-3.6.5.tar.gz/resoto-plugin-gcp-3.6.5/resoto_plugin_gcp/resources/sqladmin.py | 0.710528 | 0.172729 | sqladmin.py | pypi |
from datetime import datetime
from typing import ClassVar, Dict, Optional, List
from attr import define, field
from resoto_plugin_gcp.gcp_client import GcpApiSpec
from resoto_plugin_gcp.resources.base import GcpResource, GcpDeprecationStatus, GraphBuilder
from resotolib.baseresources import ModelReference
from resotolib.json_bender import Bender, S, Bend, ForallBend, MapDict
from resotolib.types import Json
# This service is called Google Kubernetes Engine in the docs
# https://cloud.google.com/kubernetes-engine/docs
@define(eq=False, slots=False)
class GcpContainerCloudRunConfig:
kind: ClassVar[str] = "gcp_container_cloud_run_config"
mapping: ClassVar[Dict[str, Bender]] = {"disabled": S("disabled"), "load_balancer_type": S("loadBalancerType")}
disabled: Optional[bool] = field(default=None)
load_balancer_type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerAddonsConfig:
kind: ClassVar[str] = "gcp_container_addons_config"
mapping: ClassVar[Dict[str, Bender]] = {
"cloud_run_config": S("cloudRunConfig", default={}) >> Bend(GcpContainerCloudRunConfig.mapping),
"config_connector_config": S("configConnectorConfig", "enabled"),
"dns_cache_config": S("dnsCacheConfig", "enabled"),
"gce_persistent_disk_csi_driver_config": S("gcePersistentDiskCsiDriverConfig", "enabled"),
"gcp_filestore_csi_driver_config": S("gcpFilestoreCsiDriverConfig", "enabled"),
"gke_backup_agent_config": S("gkeBackupAgentConfig", "enabled"),
"horizontal_pod_autoscaling": S("horizontalPodAutoscaling", "disabled"),
"http_load_balancing": S("httpLoadBalancing", "disabled"),
"kubernetes_dashboard": S("kubernetesDashboard", "disabled"),
"network_policy_config": S("networkPolicyConfig", "disabled"),
}
cloud_run_config: Optional[GcpContainerCloudRunConfig] = field(default=None)
config_connector_config: Optional[bool] = field(default=None)
dns_cache_config: Optional[bool] = field(default=None)
gce_persistent_disk_csi_driver_config: Optional[bool] = field(default=None)
gcp_filestore_csi_driver_config: Optional[bool] = field(default=None)
gke_backup_agent_config: Optional[bool] = field(default=None)
horizontal_pod_autoscaling: Optional[bool] = field(default=None)
http_load_balancing: Optional[bool] = field(default=None)
kubernetes_dashboard: Optional[bool] = field(default=None)
network_policy_config: Optional[bool] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerAuthenticatorGroupsConfig:
kind: ClassVar[str] = "gcp_container_authenticator_groups_config"
mapping: ClassVar[Dict[str, Bender]] = {"enabled": S("enabled"), "security_group": S("securityGroup")}
enabled: Optional[bool] = field(default=None)
security_group: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerAutoUpgradeOptions:
kind: ClassVar[str] = "gcp_container_auto_upgrade_options"
mapping: ClassVar[Dict[str, Bender]] = {
"auto_upgrade_start_time": S("autoUpgradeStartTime"),
"description": S("description"),
}
auto_upgrade_start_time: Optional[datetime] = field(default=None)
description: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerNodeManagement:
kind: ClassVar[str] = "gcp_container_node_management"
mapping: ClassVar[Dict[str, Bender]] = {
"auto_repair": S("autoRepair"),
"auto_upgrade": S("autoUpgrade"),
"upgrade_options": S("upgradeOptions", default={}) >> Bend(GcpContainerAutoUpgradeOptions.mapping),
}
auto_repair: Optional[bool] = field(default=None)
auto_upgrade: Optional[bool] = field(default=None)
upgrade_options: Optional[GcpContainerAutoUpgradeOptions] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerShieldedInstanceConfig:
kind: ClassVar[str] = "gcp_container_shielded_instance_config"
mapping: ClassVar[Dict[str, Bender]] = {
"enable_integrity_monitoring": S("enableIntegrityMonitoring"),
"enable_secure_boot": S("enableSecureBoot"),
}
enable_integrity_monitoring: Optional[bool] = field(default=None)
enable_secure_boot: Optional[bool] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerStandardRolloutPolicy:
kind: ClassVar[str] = "gcp_container_standard_rollout_policy"
mapping: ClassVar[Dict[str, Bender]] = {
"batch_node_count": S("batchNodeCount"),
"batch_percentage": S("batchPercentage"),
"batch_soak_duration": S("batchSoakDuration"),
}
batch_node_count: Optional[int] = field(default=None)
batch_percentage: Optional[float] = field(default=None)
batch_soak_duration: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerBlueGreenSettings:
kind: ClassVar[str] = "gcp_container_blue_green_settings"
mapping: ClassVar[Dict[str, Bender]] = {
"node_pool_soak_duration": S("nodePoolSoakDuration"),
"standard_rollout_policy": S("standardRolloutPolicy", default={})
>> Bend(GcpContainerStandardRolloutPolicy.mapping),
}
node_pool_soak_duration: Optional[str] = field(default=None)
standard_rollout_policy: Optional[GcpContainerStandardRolloutPolicy] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerUpgradeSettings:
kind: ClassVar[str] = "gcp_container_upgrade_settings"
mapping: ClassVar[Dict[str, Bender]] = {
"blue_green_settings": S("blueGreenSettings", default={}) >> Bend(GcpContainerBlueGreenSettings.mapping),
"max_surge": S("maxSurge"),
"max_unavailable": S("maxUnavailable"),
"strategy": S("strategy"),
}
blue_green_settings: Optional[GcpContainerBlueGreenSettings] = field(default=None)
max_surge: Optional[int] = field(default=None)
max_unavailable: Optional[int] = field(default=None)
strategy: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerAutoprovisioningNodePoolDefaults:
kind: ClassVar[str] = "gcp_container_autoprovisioning_node_pool_defaults"
mapping: ClassVar[Dict[str, Bender]] = {
"boot_disk_kms_key": S("bootDiskKmsKey"),
"disk_size_gb": S("diskSizeGb"),
"disk_type": S("diskType"),
"image_type": S("imageType"),
"management": S("management", default={}) >> Bend(GcpContainerNodeManagement.mapping),
"min_cpu_platform": S("minCpuPlatform"),
"oauth_scopes": S("oauthScopes", default=[]),
"service_account": S("serviceAccount"),
"shielded_instance_config": S("shieldedInstanceConfig", default={})
>> Bend(GcpContainerShieldedInstanceConfig.mapping),
"upgrade_settings": S("upgradeSettings", default={}) >> Bend(GcpContainerUpgradeSettings.mapping),
}
boot_disk_kms_key: Optional[str] = field(default=None)
disk_size_gb: Optional[int] = field(default=None)
disk_type: Optional[str] = field(default=None)
image_type: Optional[str] = field(default=None)
management: Optional[GcpContainerNodeManagement] = field(default=None)
min_cpu_platform: Optional[str] = field(default=None)
oauth_scopes: Optional[List[str]] = field(default=None)
service_account: Optional[str] = field(default=None)
shielded_instance_config: Optional[GcpContainerShieldedInstanceConfig] = field(default=None)
upgrade_settings: Optional[GcpContainerUpgradeSettings] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerResourceLimit:
kind: ClassVar[str] = "gcp_container_resource_limit"
mapping: ClassVar[Dict[str, Bender]] = {
"maximum": S("maximum"),
"minimum": S("minimum"),
"resource_type": S("resourceType"),
}
maximum: Optional[str] = field(default=None)
minimum: Optional[str] = field(default=None)
resource_type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerClusterAutoscaling:
kind: ClassVar[str] = "gcp_container_cluster_autoscaling"
mapping: ClassVar[Dict[str, Bender]] = {
"autoprovisioning_locations": S("autoprovisioningLocations", default=[]),
"autoprovisioning_node_pool_defaults": S("autoprovisioningNodePoolDefaults", default={})
>> Bend(GcpContainerAutoprovisioningNodePoolDefaults.mapping),
"autoscaling_profile": S("autoscalingProfile"),
"enable_node_autoprovisioning": S("enableNodeAutoprovisioning"),
"resource_limits": S("resourceLimits", default=[]) >> ForallBend(GcpContainerResourceLimit.mapping),
}
autoprovisioning_locations: Optional[List[str]] = field(default=None)
autoprovisioning_node_pool_defaults: Optional[GcpContainerAutoprovisioningNodePoolDefaults] = field(default=None)
autoscaling_profile: Optional[str] = field(default=None)
enable_node_autoprovisioning: Optional[bool] = field(default=None)
resource_limits: Optional[List[GcpContainerResourceLimit]] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerBinaryAuthorization:
kind: ClassVar[str] = "gcp_container_binary_authorization"
mapping: ClassVar[Dict[str, Bender]] = {"enabled": S("enabled"), "evaluation_mode": S("evaluationMode")}
enabled: Optional[bool] = field(default=None)
evaluation_mode: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerStatusCondition:
kind: ClassVar[str] = "gcp_container_status_condition"
mapping: ClassVar[Dict[str, Bender]] = {
"canonical_code": S("canonicalCode"),
"code": S("code"),
"message": S("message"),
}
canonical_code: Optional[str] = field(default=None)
code: Optional[str] = field(default=None)
message: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerDatabaseEncryption:
kind: ClassVar[str] = "gcp_container_database_encryption"
mapping: ClassVar[Dict[str, Bender]] = {"key_name": S("keyName"), "state": S("state")}
key_name: Optional[str] = field(default=None)
state: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerIPAllocationPolicy:
kind: ClassVar[str] = "gcp_container_ip_allocation_policy"
mapping: ClassVar[Dict[str, Bender]] = {
"cluster_ipv4_cidr": S("clusterIpv4Cidr"),
"cluster_ipv4_cidr_block": S("clusterIpv4CidrBlock"),
"cluster_secondary_range_name": S("clusterSecondaryRangeName"),
"create_subnetwork": S("createSubnetwork"),
"ipv6_access_type": S("ipv6AccessType"),
"node_ipv4_cidr": S("nodeIpv4Cidr"),
"node_ipv4_cidr_block": S("nodeIpv4CidrBlock"),
"services_ipv4_cidr": S("servicesIpv4Cidr"),
"services_ipv4_cidr_block": S("servicesIpv4CidrBlock"),
"services_secondary_range_name": S("servicesSecondaryRangeName"),
"stack_type": S("stackType"),
"subnetwork_name": S("subnetworkName"),
"tpu_ipv4_cidr_block": S("tpuIpv4CidrBlock"),
"use_ip_aliases": S("useIpAliases"),
"use_routes": S("useRoutes"),
}
cluster_ipv4_cidr: Optional[str] = field(default=None)
cluster_ipv4_cidr_block: Optional[str] = field(default=None)
cluster_secondary_range_name: Optional[str] = field(default=None)
create_subnetwork: Optional[bool] = field(default=None)
ipv6_access_type: Optional[str] = field(default=None)
node_ipv4_cidr: Optional[str] = field(default=None)
node_ipv4_cidr_block: Optional[str] = field(default=None)
services_ipv4_cidr: Optional[str] = field(default=None)
services_ipv4_cidr_block: Optional[str] = field(default=None)
services_secondary_range_name: Optional[str] = field(default=None)
stack_type: Optional[str] = field(default=None)
subnetwork_name: Optional[str] = field(default=None)
tpu_ipv4_cidr_block: Optional[str] = field(default=None)
use_ip_aliases: Optional[bool] = field(default=None)
use_routes: Optional[bool] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerLoggingComponentConfig:
kind: ClassVar[str] = "gcp_container_logging_component_config"
mapping: ClassVar[Dict[str, Bender]] = {"enable_components": S("enableComponents", default=[])}
enable_components: Optional[List[str]] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerLoggingConfig:
kind: ClassVar[str] = "gcp_container_logging_config"
mapping: ClassVar[Dict[str, Bender]] = {
"component_config": S("componentConfig", default={}) >> Bend(GcpContainerLoggingComponentConfig.mapping)
}
component_config: Optional[GcpContainerLoggingComponentConfig] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerDailyMaintenanceWindow:
kind: ClassVar[str] = "gcp_container_daily_maintenance_window"
mapping: ClassVar[Dict[str, Bender]] = {"duration": S("duration"), "start_time": S("startTime")}
duration: Optional[str] = field(default=None)
start_time: Optional[datetime] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerTimeWindow:
kind: ClassVar[str] = "gcp_container_time_window"
mapping: ClassVar[Dict[str, Bender]] = {
"end_time": S("endTime"),
"maintenance_exclusion_options": S("maintenanceExclusionOptions", "scope"),
"start_time": S("startTime"),
}
end_time: Optional[datetime] = field(default=None)
maintenance_exclusion_options: Optional[str] = field(default=None)
start_time: Optional[datetime] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerRecurringTimeWindow:
kind: ClassVar[str] = "gcp_container_recurring_time_window"
mapping: ClassVar[Dict[str, Bender]] = {
"recurrence": S("recurrence"),
"window": S("window", default={}) >> Bend(GcpContainerTimeWindow.mapping),
}
recurrence: Optional[str] = field(default=None)
window: Optional[GcpContainerTimeWindow] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerMaintenanceWindow:
kind: ClassVar[str] = "gcp_container_maintenance_window"
mapping: ClassVar[Dict[str, Bender]] = {
"daily_maintenance_window": S("dailyMaintenanceWindow", default={})
>> Bend(GcpContainerDailyMaintenanceWindow.mapping),
"maintenance_exclusions": S("maintenanceExclusions", default={})
>> MapDict(value_bender=Bend(GcpContainerTimeWindow.mapping)),
"recurring_window": S("recurringWindow", default={}) >> Bend(GcpContainerRecurringTimeWindow.mapping),
}
daily_maintenance_window: Optional[GcpContainerDailyMaintenanceWindow] = field(default=None)
maintenance_exclusions: Optional[Dict[str, GcpContainerTimeWindow]] = field(default=None)
recurring_window: Optional[GcpContainerRecurringTimeWindow] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerMaintenancePolicy:
kind: ClassVar[str] = "gcp_container_maintenance_policy"
mapping: ClassVar[Dict[str, Bender]] = {
"resource_version": S("resourceVersion"),
"window": S("window", default={}) >> Bend(GcpContainerMaintenanceWindow.mapping),
}
resource_version: Optional[str] = field(default=None)
window: Optional[GcpContainerMaintenanceWindow] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerMasterAuth:
kind: ClassVar[str] = "gcp_container_master_auth"
mapping: ClassVar[Dict[str, Bender]] = {
"client_certificate": S("clientCertificate"),
"client_certificate_config": S("clientCertificateConfig", "issueClientCertificate"),
"client_key": S("clientKey"),
"cluster_ca_certificate": S("clusterCaCertificate"),
"password": S("password"),
"username": S("username"),
}
client_certificate: Optional[str] = field(default=None)
client_certificate_config: Optional[bool] = field(default=None)
client_key: Optional[str] = field(default=None)
cluster_ca_certificate: Optional[str] = field(default=None)
password: Optional[str] = field(default=None)
username: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerCidrBlock:
kind: ClassVar[str] = "gcp_container_cidr_block"
mapping: ClassVar[Dict[str, Bender]] = {"cidr_block": S("cidrBlock"), "display_name": S("displayName")}
cidr_block: Optional[str] = field(default=None)
display_name: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerMasterAuthorizedNetworksConfig:
kind: ClassVar[str] = "gcp_container_master_authorized_networks_config"
mapping: ClassVar[Dict[str, Bender]] = {
"cidr_blocks": S("cidrBlocks", default=[]) >> ForallBend(GcpContainerCidrBlock.mapping),
"enabled": S("enabled"),
}
cidr_blocks: Optional[List[GcpContainerCidrBlock]] = field(default=None)
enabled: Optional[bool] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerMonitoringComponentConfig:
kind: ClassVar[str] = "gcp_container_monitoring_component_config"
mapping: ClassVar[Dict[str, Bender]] = {"enable_components": S("enableComponents", default=[])}
enable_components: Optional[List[str]] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerMonitoringConfig:
kind: ClassVar[str] = "gcp_container_monitoring_config"
mapping: ClassVar[Dict[str, Bender]] = {
"component_config": S("componentConfig", default={}) >> Bend(GcpContainerMonitoringComponentConfig.mapping),
"managed_prometheus_config": S("managedPrometheusConfig", "enabled"),
}
component_config: Optional[GcpContainerMonitoringComponentConfig] = field(default=None)
managed_prometheus_config: Optional[bool] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerDNSConfig:
kind: ClassVar[str] = "gcp_container_dns_config"
mapping: ClassVar[Dict[str, Bender]] = {
"cluster_dns": S("clusterDns"),
"cluster_dns_domain": S("clusterDnsDomain"),
"cluster_dns_scope": S("clusterDnsScope"),
}
cluster_dns: Optional[str] = field(default=None)
cluster_dns_domain: Optional[str] = field(default=None)
cluster_dns_scope: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerNetworkConfig:
kind: ClassVar[str] = "gcp_container_network_config"
mapping: ClassVar[Dict[str, Bender]] = {
"datapath_provider": S("datapathProvider"),
"default_snat_status": S("defaultSnatStatus", "disabled"),
"dns_config": S("dnsConfig", default={}) >> Bend(GcpContainerDNSConfig.mapping),
"enable_intra_node_visibility": S("enableIntraNodeVisibility"),
"enable_l4ilb_subsetting": S("enableL4ilbSubsetting"),
"network": S("network"),
"private_ipv6_google_access": S("privateIpv6GoogleAccess"),
"service_external_ips_config": S("serviceExternalIpsConfig", "enabled"),
"subnetwork": S("subnetwork"),
}
datapath_provider: Optional[str] = field(default=None)
default_snat_status: Optional[bool] = field(default=None)
dns_config: Optional[GcpContainerDNSConfig] = field(default=None)
enable_intra_node_visibility: Optional[bool] = field(default=None)
enable_l4ilb_subsetting: Optional[bool] = field(default=None)
network: Optional[str] = field(default=None)
private_ipv6_google_access: Optional[str] = field(default=None)
service_external_ips_config: Optional[bool] = field(default=None)
subnetwork: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerNetworkPolicy:
kind: ClassVar[str] = "gcp_container_network_policy"
mapping: ClassVar[Dict[str, Bender]] = {"enabled": S("enabled"), "provider": S("provider")}
enabled: Optional[bool] = field(default=None)
provider: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerGPUSharingConfig:
kind: ClassVar[str] = "gcp_container_gpu_sharing_config"
mapping: ClassVar[Dict[str, Bender]] = {
"gpu_sharing_strategy": S("gpuSharingStrategy"),
"max_shared_clients_per_gpu": S("maxSharedClientsPerGpu"),
}
gpu_sharing_strategy: Optional[str] = field(default=None)
max_shared_clients_per_gpu: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerAcceleratorConfig:
kind: ClassVar[str] = "gcp_container_accelerator_config"
mapping: ClassVar[Dict[str, Bender]] = {
"accelerator_count": S("acceleratorCount"),
"accelerator_type": S("acceleratorType"),
"gpu_partition_size": S("gpuPartitionSize"),
"gpu_sharing_config": S("gpuSharingConfig", default={}) >> Bend(GcpContainerGPUSharingConfig.mapping),
}
accelerator_count: Optional[str] = field(default=None)
accelerator_type: Optional[str] = field(default=None)
gpu_partition_size: Optional[str] = field(default=None)
gpu_sharing_config: Optional[GcpContainerGPUSharingConfig] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerNodeKubeletConfig:
kind: ClassVar[str] = "gcp_container_node_kubelet_config"
mapping: ClassVar[Dict[str, Bender]] = {
"cpu_cfs_quota": S("cpuCfsQuota"),
"cpu_cfs_quota_period": S("cpuCfsQuotaPeriod"),
"cpu_manager_policy": S("cpuManagerPolicy"),
"pod_pids_limit": S("podPidsLimit"),
}
cpu_cfs_quota: Optional[bool] = field(default=None)
cpu_cfs_quota_period: Optional[str] = field(default=None)
cpu_manager_policy: Optional[str] = field(default=None)
pod_pids_limit: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerLinuxNodeConfig:
kind: ClassVar[str] = "gcp_container_linux_node_config"
mapping: ClassVar[Dict[str, Bender]] = {"sysctls": S("sysctls")}
sysctls: Optional[Dict[str, str]] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerNodePoolLoggingConfig:
kind: ClassVar[str] = "gcp_container_node_pool_logging_config"
mapping: ClassVar[Dict[str, Bender]] = {"variant_config": S("variantConfig", "variant")}
variant_config: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerReservationAffinity:
kind: ClassVar[str] = "gcp_container_reservation_affinity"
mapping: ClassVar[Dict[str, Bender]] = {
"consume_reservation_type": S("consumeReservationType"),
"key": S("key"),
"values": S("values", default=[]),
}
consume_reservation_type: Optional[str] = field(default=None)
key: Optional[str] = field(default=None)
values: Optional[List[str]] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerNodeTaint:
kind: ClassVar[str] = "gcp_container_node_taint"
mapping: ClassVar[Dict[str, Bender]] = {"effect": S("effect"), "key": S("key"), "value": S("value")}
effect: Optional[str] = field(default=None)
key: Optional[str] = field(default=None)
value: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerNodeConfig:
kind: ClassVar[str] = "gcp_container_node_config"
mapping: ClassVar[Dict[str, Bender]] = {
"accelerators": S("accelerators", default=[]) >> ForallBend(GcpContainerAcceleratorConfig.mapping),
"advanced_machine_features": S("advancedMachineFeatures", "threadsPerCore"),
"boot_disk_kms_key": S("bootDiskKmsKey"),
"confidential_nodes": S("confidentialNodes", "enabled"),
"disk_size_gb": S("diskSizeGb"),
"disk_type": S("diskType"),
"gcfs_config": S("gcfsConfig", "enabled"),
"gvnic": S("gvnic", "enabled"),
"image_type": S("imageType"),
"kubelet_config": S("kubeletConfig", default={}) >> Bend(GcpContainerNodeKubeletConfig.mapping),
"labels": S("labels"),
"linux_node_config": S("linuxNodeConfig", default={}) >> Bend(GcpContainerLinuxNodeConfig.mapping),
"local_ssd_count": S("localSsdCount"),
"logging_config": S("loggingConfig", default={}) >> Bend(GcpContainerNodePoolLoggingConfig.mapping),
"machine_type": S("machineType"),
"metadata": S("metadata"),
"min_cpu_platform": S("minCpuPlatform"),
"node_group": S("nodeGroup"),
"oauth_scopes": S("oauthScopes", default=[]),
"preemptible": S("preemptible"),
"reservation_affinity": S("reservationAffinity", default={}) >> Bend(GcpContainerReservationAffinity.mapping),
"sandbox_config": S("sandboxConfig", "type"),
"service_account": S("serviceAccount"),
"shielded_instance_config": S("shieldedInstanceConfig", default={})
>> Bend(GcpContainerShieldedInstanceConfig.mapping),
"spot": S("spot"),
"tags": S("tags", default=[]),
"taints": S("taints", default=[]) >> ForallBend(GcpContainerNodeTaint.mapping),
"workload_metadata_config": S("workloadMetadataConfig", "mode"),
}
accelerators: Optional[List[GcpContainerAcceleratorConfig]] = field(default=None)
advanced_machine_features: Optional[str] = field(default=None)
boot_disk_kms_key: Optional[str] = field(default=None)
confidential_nodes: Optional[bool] = field(default=None)
disk_size_gb: Optional[int] = field(default=None)
disk_type: Optional[str] = field(default=None)
gcfs_config: Optional[bool] = field(default=None)
gvnic: Optional[bool] = field(default=None)
image_type: Optional[str] = field(default=None)
kubelet_config: Optional[GcpContainerNodeKubeletConfig] = field(default=None)
labels: Optional[Dict[str, str]] = field(default=None)
linux_node_config: Optional[GcpContainerLinuxNodeConfig] = field(default=None)
local_ssd_count: Optional[int] = field(default=None)
logging_config: Optional[GcpContainerNodePoolLoggingConfig] = field(default=None)
machine_type: Optional[str] = field(default=None)
metadata: Optional[Dict[str, str]] = field(default=None)
min_cpu_platform: Optional[str] = field(default=None)
node_group: Optional[str] = field(default=None)
oauth_scopes: Optional[List[str]] = field(default=None)
preemptible: Optional[bool] = field(default=None)
reservation_affinity: Optional[GcpContainerReservationAffinity] = field(default=None)
sandbox_config: Optional[str] = field(default=None)
service_account: Optional[str] = field(default=None)
shielded_instance_config: Optional[GcpContainerShieldedInstanceConfig] = field(default=None)
spot: Optional[bool] = field(default=None)
tags: Optional[List[str]] = field(default=None)
taints: Optional[List[GcpContainerNodeTaint]] = field(default=None)
workload_metadata_config: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerNetworkTags:
kind: ClassVar[str] = "gcp_container_network_tags"
mapping: ClassVar[Dict[str, Bender]] = {"tags": S("tags", default=[])}
tags: Optional[List[str]] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerNodePoolAutoConfig:
kind: ClassVar[str] = "gcp_container_node_pool_auto_config"
mapping: ClassVar[Dict[str, Bender]] = {
"network_tags": S("networkTags", default={}) >> Bend(GcpContainerNetworkTags.mapping)
}
network_tags: Optional[GcpContainerNetworkTags] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerNodeConfigDefaults:
kind: ClassVar[str] = "gcp_container_node_config_defaults"
mapping: ClassVar[Dict[str, Bender]] = {
"gcfs_config": S("gcfsConfig", "enabled"),
"logging_config": S("loggingConfig", default={}) >> Bend(GcpContainerNodePoolLoggingConfig.mapping),
}
gcfs_config: Optional[bool] = field(default=None)
logging_config: Optional[GcpContainerNodePoolLoggingConfig] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerNodePoolDefaults:
kind: ClassVar[str] = "gcp_container_node_pool_defaults"
mapping: ClassVar[Dict[str, Bender]] = {
"node_config_defaults": S("nodeConfigDefaults", default={}) >> Bend(GcpContainerNodeConfigDefaults.mapping)
}
node_config_defaults: Optional[GcpContainerNodeConfigDefaults] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerNodePoolAutoscaling:
kind: ClassVar[str] = "gcp_container_node_pool_autoscaling"
mapping: ClassVar[Dict[str, Bender]] = {
"autoprovisioned": S("autoprovisioned"),
"enabled": S("enabled"),
"location_policy": S("locationPolicy"),
"max_node_count": S("maxNodeCount"),
"min_node_count": S("minNodeCount"),
"total_max_node_count": S("totalMaxNodeCount"),
"total_min_node_count": S("totalMinNodeCount"),
}
autoprovisioned: Optional[bool] = field(default=None)
enabled: Optional[bool] = field(default=None)
location_policy: Optional[str] = field(default=None)
max_node_count: Optional[int] = field(default=None)
min_node_count: Optional[int] = field(default=None)
total_max_node_count: Optional[int] = field(default=None)
total_min_node_count: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerNodeNetworkConfig:
kind: ClassVar[str] = "gcp_container_node_network_config"
mapping: ClassVar[Dict[str, Bender]] = {
"create_pod_range": S("createPodRange"),
"network_performance_config": S("networkPerformanceConfig", "totalEgressBandwidthTier"),
"pod_ipv4_cidr_block": S("podIpv4CidrBlock"),
"pod_range": S("podRange"),
}
create_pod_range: Optional[bool] = field(default=None)
network_performance_config: Optional[str] = field(default=None)
pod_ipv4_cidr_block: Optional[str] = field(default=None)
pod_range: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerBlueGreenInfo:
kind: ClassVar[str] = "gcp_container_blue_green_info"
mapping: ClassVar[Dict[str, Bender]] = {
"blue_instance_group_urls": S("blueInstanceGroupUrls", default=[]),
"blue_pool_deletion_start_time": S("bluePoolDeletionStartTime"),
"green_instance_group_urls": S("greenInstanceGroupUrls", default=[]),
"green_pool_version": S("greenPoolVersion"),
"phase": S("phase"),
}
blue_instance_group_urls: Optional[List[str]] = field(default=None)
blue_pool_deletion_start_time: Optional[datetime] = field(default=None)
green_instance_group_urls: Optional[List[str]] = field(default=None)
green_pool_version: Optional[str] = field(default=None)
phase: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerUpdateInfo:
kind: ClassVar[str] = "gcp_container_update_info"
mapping: ClassVar[Dict[str, Bender]] = {
"blue_green_info": S("blueGreenInfo", default={}) >> Bend(GcpContainerBlueGreenInfo.mapping)
}
blue_green_info: Optional[GcpContainerBlueGreenInfo] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerNodePool:
kind: ClassVar[str] = "gcp_container_node_pool"
mapping: ClassVar[Dict[str, Bender]] = {
"autoscaling": S("autoscaling", default={}) >> Bend(GcpContainerNodePoolAutoscaling.mapping),
"conditions": S("conditions", default=[]) >> ForallBend(GcpContainerStatusCondition.mapping),
"config": S("config", default={}) >> Bend(GcpContainerNodeConfig.mapping),
"initial_node_count": S("initialNodeCount"),
"instance_group_urls": S("instanceGroupUrls", default=[]),
"locations": S("locations", default=[]),
"management": S("management", default={}) >> Bend(GcpContainerNodeManagement.mapping),
"max_pods_constraint": S("maxPodsConstraint", "maxPodsPerNode"),
"name": S("name"),
"network_config": S("networkConfig", default={}) >> Bend(GcpContainerNodeNetworkConfig.mapping),
"pod_ipv4_cidr_size": S("podIpv4CidrSize"),
"self_link": S("selfLink"),
"status": S("status"),
"status_message": S("statusMessage"),
"update_info": S("updateInfo", default={}) >> Bend(GcpContainerUpdateInfo.mapping),
"upgrade_settings": S("upgradeSettings", default={}) >> Bend(GcpContainerUpgradeSettings.mapping),
"version": S("version"),
}
autoscaling: Optional[GcpContainerNodePoolAutoscaling] = field(default=None)
conditions: Optional[List[GcpContainerStatusCondition]] = field(default=None)
config: Optional[GcpContainerNodeConfig] = field(default=None)
initial_node_count: Optional[int] = field(default=None)
instance_group_urls: Optional[List[str]] = field(default=None)
locations: Optional[List[str]] = field(default=None)
management: Optional[GcpContainerNodeManagement] = field(default=None)
max_pods_constraint: Optional[str] = field(default=None)
name: Optional[str] = field(default=None)
network_config: Optional[GcpContainerNodeNetworkConfig] = field(default=None)
pod_ipv4_cidr_size: Optional[int] = field(default=None)
self_link: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
status_message: Optional[str] = field(default=None)
update_info: Optional[GcpContainerUpdateInfo] = field(default=None)
upgrade_settings: Optional[GcpContainerUpgradeSettings] = field(default=None)
version: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerFilter:
kind: ClassVar[str] = "gcp_container_filter"
mapping: ClassVar[Dict[str, Bender]] = {"event_type": S("eventType", default=[])}
event_type: Optional[List[str]] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerPubSub:
kind: ClassVar[str] = "gcp_container_pub_sub"
mapping: ClassVar[Dict[str, Bender]] = {
"enabled": S("enabled"),
"filter": S("filter", default={}) >> Bend(GcpContainerFilter.mapping),
"topic": S("topic"),
}
enabled: Optional[bool] = field(default=None)
filter: Optional[GcpContainerFilter] = field(default=None)
topic: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerNotificationConfig:
kind: ClassVar[str] = "gcp_container_notification_config"
mapping: ClassVar[Dict[str, Bender]] = {"pubsub": S("pubsub", default={}) >> Bend(GcpContainerPubSub.mapping)}
pubsub: Optional[GcpContainerPubSub] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerPrivateClusterConfig:
kind: ClassVar[str] = "gcp_container_private_cluster_config"
mapping: ClassVar[Dict[str, Bender]] = {
"enable_private_endpoint": S("enablePrivateEndpoint"),
"enable_private_nodes": S("enablePrivateNodes"),
"master_global_access_config": S("masterGlobalAccessConfig", "enabled"),
"master_ipv4_cidr_block": S("masterIpv4CidrBlock"),
"peering_name": S("peeringName"),
"private_endpoint": S("privateEndpoint"),
"public_endpoint": S("publicEndpoint"),
}
enable_private_endpoint: Optional[bool] = field(default=None)
enable_private_nodes: Optional[bool] = field(default=None)
master_global_access_config: Optional[bool] = field(default=None)
master_ipv4_cidr_block: Optional[str] = field(default=None)
peering_name: Optional[str] = field(default=None)
private_endpoint: Optional[str] = field(default=None)
public_endpoint: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerResourceUsageExportConfig:
kind: ClassVar[str] = "gcp_container_resource_usage_export_config"
mapping: ClassVar[Dict[str, Bender]] = {
"bigquery_destination": S("bigqueryDestination", "datasetId"),
"consumption_metering_config": S("consumptionMeteringConfig", "enabled"),
"enable_network_egress_metering": S("enableNetworkEgressMetering"),
}
bigquery_destination: Optional[str] = field(default=None)
consumption_metering_config: Optional[bool] = field(default=None)
enable_network_egress_metering: Optional[bool] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerCluster(GcpResource):
kind: ClassVar[str] = "gcp_container_cluster"
api_spec: ClassVar[GcpApiSpec] = GcpApiSpec(
service="container",
version="v1",
accessors=["projects", "locations", "clusters"],
action="list",
request_parameter={"parent": "projects/{project}/locations/-"},
request_parameter_in={"project"},
response_path="clusters",
response_regional_sub_path=None,
required_iam_permissions=["container.clusters.list"],
mutate_iam_permissions=["container.clusters.update", "container.clusters.delete"],
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("name").or_else(S("id")).or_else(S("selfLink")),
"tags": S("labels", default={}),
"name": S("name"),
"ctime": S("creationTimestamp"),
"description": S("description"),
"link": S("selfLink"),
"label_fingerprint": S("labelFingerprint"),
"deprecation_status": S("deprecated", default={}) >> Bend(GcpDeprecationStatus.mapping),
"addons_config": S("addonsConfig", default={}) >> Bend(GcpContainerAddonsConfig.mapping),
"authenticator_groups_config": S("authenticatorGroupsConfig", default={})
>> Bend(GcpContainerAuthenticatorGroupsConfig.mapping),
"autopilot": S("autopilot", "enabled"),
"autoscaling": S("autoscaling", default={}) >> Bend(GcpContainerClusterAutoscaling.mapping),
"binary_authorization": S("binaryAuthorization", default={}) >> Bend(GcpContainerBinaryAuthorization.mapping),
"cluster_ipv4_cidr": S("clusterIpv4Cidr"),
"conditions": S("conditions", default=[]) >> ForallBend(GcpContainerStatusCondition.mapping),
"confidential_nodes": S("confidentialNodes", "enabled"),
"cost_management_config": S("costManagementConfig", "enabled"),
"create_time": S("createTime"),
"current_master_version": S("currentMasterVersion"),
"current_node_count": S("currentNodeCount"),
"current_node_version": S("currentNodeVersion"),
"database_encryption": S("databaseEncryption", default={}) >> Bend(GcpContainerDatabaseEncryption.mapping),
"default_max_pods_constraint": S("defaultMaxPodsConstraint", "maxPodsPerNode"),
"enable_kubernetes_alpha": S("enableKubernetesAlpha"),
"enable_tpu": S("enableTpu"),
"endpoint": S("endpoint"),
"etag": S("etag"),
"expire_time": S("expireTime"),
"identity_service_config": S("identityServiceConfig", "enabled"),
"initial_cluster_version": S("initialClusterVersion"),
"initial_node_count": S("initialNodeCount"),
"instance_group_urls": S("instanceGroupUrls", default=[]),
"ip_allocation_policy": S("ipAllocationPolicy", default={}) >> Bend(GcpContainerIPAllocationPolicy.mapping),
"legacy_abac": S("legacyAbac", "enabled"),
"location": S("location"),
"locations": S("locations", default=[]),
"logging_config": S("loggingConfig", default={}) >> Bend(GcpContainerLoggingConfig.mapping),
"logging_service": S("loggingService"),
"container_cluster_maintenance_policy": S("maintenancePolicy", default={})
>> Bend(GcpContainerMaintenancePolicy.mapping),
"master_auth": S("masterAuth", default={}) >> Bend(GcpContainerMasterAuth.mapping),
"master_authorized_networks_config": S("masterAuthorizedNetworksConfig", default={})
>> Bend(GcpContainerMasterAuthorizedNetworksConfig.mapping),
"mesh_certificates": S("meshCertificates", "enableCertificates"),
"monitoring_config": S("monitoringConfig", default={}) >> Bend(GcpContainerMonitoringConfig.mapping),
"monitoring_service": S("monitoringService"),
"network": S("network"),
"network_config": S("networkConfig", default={}) >> Bend(GcpContainerNetworkConfig.mapping),
"network_policy": S("networkPolicy", default={}) >> Bend(GcpContainerNetworkPolicy.mapping),
"node_config": S("nodeConfig", default={}) >> Bend(GcpContainerNodeConfig.mapping),
"node_ipv4_cidr_size": S("nodeIpv4CidrSize"),
"node_pool_auto_config": S("nodePoolAutoConfig", default={}) >> Bend(GcpContainerNodePoolAutoConfig.mapping),
"node_pool_defaults": S("nodePoolDefaults", default={}) >> Bend(GcpContainerNodePoolDefaults.mapping),
"node_pools": S("nodePools", default=[]) >> ForallBend(GcpContainerNodePool.mapping),
"notification_config": S("notificationConfig", default={}) >> Bend(GcpContainerNotificationConfig.mapping),
"private_cluster_config": S("privateClusterConfig", default={})
>> Bend(GcpContainerPrivateClusterConfig.mapping),
"release_channel": S("releaseChannel", "channel"),
"resource_labels": S("resourceLabels"),
"resource_usage_export_config": S("resourceUsageExportConfig", default={})
>> Bend(GcpContainerResourceUsageExportConfig.mapping),
"services_ipv4_cidr": S("servicesIpv4Cidr"),
"shielded_nodes": S("shieldedNodes", "enabled"),
"status": S("status"),
"status_message": S("statusMessage"),
"subnetwork": S("subnetwork"),
"tpu_ipv4_cidr_block": S("tpuIpv4CidrBlock"),
"vertical_pod_autoscaling": S("verticalPodAutoscaling", "enabled"),
"workload_identity_config": S("workloadIdentityConfig", "workloadPool"),
}
addons_config: Optional[GcpContainerAddonsConfig] = field(default=None)
authenticator_groups_config: Optional[GcpContainerAuthenticatorGroupsConfig] = field(default=None)
autopilot: Optional[bool] = field(default=None)
autoscaling: Optional[GcpContainerClusterAutoscaling] = field(default=None)
binary_authorization: Optional[GcpContainerBinaryAuthorization] = field(default=None)
cluster_ipv4_cidr: Optional[str] = field(default=None)
conditions: Optional[List[GcpContainerStatusCondition]] = field(default=None)
confidential_nodes: Optional[bool] = field(default=None)
cost_management_config: Optional[bool] = field(default=None)
create_time: Optional[datetime] = field(default=None)
current_master_version: Optional[str] = field(default=None)
current_node_count: Optional[int] = field(default=None)
current_node_version: Optional[str] = field(default=None)
database_encryption: Optional[GcpContainerDatabaseEncryption] = field(default=None)
default_max_pods_constraint: Optional[str] = field(default=None)
enable_kubernetes_alpha: Optional[bool] = field(default=None)
enable_tpu: Optional[bool] = field(default=None)
endpoint: Optional[str] = field(default=None)
etag: Optional[str] = field(default=None)
expire_time: Optional[datetime] = field(default=None)
identity_service_config: Optional[bool] = field(default=None)
initial_cluster_version: Optional[str] = field(default=None)
initial_node_count: Optional[int] = field(default=None)
instance_group_urls: Optional[List[str]] = field(default=None)
ip_allocation_policy: Optional[GcpContainerIPAllocationPolicy] = field(default=None)
legacy_abac: Optional[bool] = field(default=None)
location: Optional[str] = field(default=None)
locations: Optional[List[str]] = field(default=None)
logging_config: Optional[GcpContainerLoggingConfig] = field(default=None)
logging_service: Optional[str] = field(default=None)
container_cluster_maintenance_policy: Optional[GcpContainerMaintenancePolicy] = field(default=None)
master_auth: Optional[GcpContainerMasterAuth] = field(default=None)
master_authorized_networks_config: Optional[GcpContainerMasterAuthorizedNetworksConfig] = field(default=None)
mesh_certificates: Optional[bool] = field(default=None)
monitoring_config: Optional[GcpContainerMonitoringConfig] = field(default=None)
monitoring_service: Optional[str] = field(default=None)
network: Optional[str] = field(default=None)
network_config: Optional[GcpContainerNetworkConfig] = field(default=None)
network_policy: Optional[GcpContainerNetworkPolicy] = field(default=None)
node_config: Optional[GcpContainerNodeConfig] = field(default=None)
node_ipv4_cidr_size: Optional[int] = field(default=None)
node_pool_auto_config: Optional[GcpContainerNodePoolAutoConfig] = field(default=None)
node_pool_defaults: Optional[GcpContainerNodePoolDefaults] = field(default=None)
node_pools: Optional[List[GcpContainerNodePool]] = field(default=None)
notification_config: Optional[GcpContainerNotificationConfig] = field(default=None)
private_cluster_config: Optional[GcpContainerPrivateClusterConfig] = field(default=None)
release_channel: Optional[str] = field(default=None)
resource_labels: Optional[Dict[str, str]] = field(default=None)
resource_usage_export_config: Optional[GcpContainerResourceUsageExportConfig] = field(default=None)
services_ipv4_cidr: Optional[str] = field(default=None)
shielded_nodes: Optional[bool] = field(default=None)
status: Optional[str] = field(default=None)
status_message: Optional[str] = field(default=None)
subnetwork: Optional[str] = field(default=None)
tpu_ipv4_cidr_block: Optional[str] = field(default=None)
vertical_pod_autoscaling: Optional[bool] = field(default=None)
workload_identity_config: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerStatus:
kind: ClassVar[str] = "gcp_container_status"
mapping: ClassVar[Dict[str, Bender]] = {
"code": S("code"),
"details": S("details", default=[]),
"message": S("message"),
}
code: Optional[int] = field(default=None)
details: Optional[List[Json]] = field(default=None)
message: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerMetric:
kind: ClassVar[str] = "gcp_container_metric"
mapping: ClassVar[Dict[str, Bender]] = {
"double_value": S("doubleValue"),
"int_value": S("intValue"),
"name": S("name"),
"string_value": S("stringValue"),
}
double_value: Optional[float] = field(default=None)
int_value: Optional[str] = field(default=None)
name: Optional[str] = field(default=None)
string_value: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerOperationProgress:
kind: ClassVar[str] = "gcp_container_operation_progress"
mapping: ClassVar[Dict[str, Bender]] = {
"metrics": S("metrics", default=[]) >> ForallBend(GcpContainerMetric.mapping),
"name": S("name"),
"status": S("status"),
}
metrics: Optional[List[GcpContainerMetric]] = field(default=None)
name: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpContainerOperation(GcpResource):
kind: ClassVar[str] = "gcp_container_operation"
reference_kinds: ClassVar[ModelReference] = {"predecessors": {"default": ["gcp_container_cluster"]}}
api_spec: ClassVar[GcpApiSpec] = GcpApiSpec(
service="container",
version="v1",
accessors=["projects", "locations", "operations"],
action="list",
request_parameter={"parent": "projects/{project}/locations/-"},
request_parameter_in={"project"},
response_path="operations",
response_regional_sub_path=None,
required_iam_permissions=["container.operations.list"],
mutate_iam_permissions=[],
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("name").or_else(S("id")).or_else(S("selfLink")),
"tags": S("labels", default={}),
"name": S("name"),
"ctime": S("creationTimestamp"),
"description": S("description"),
"link": S("selfLink"),
"label_fingerprint": S("labelFingerprint"),
"deprecation_status": S("deprecated", default={}) >> Bend(GcpDeprecationStatus.mapping),
"cluster_conditions": S("clusterConditions", default=[]) >> ForallBend(GcpContainerStatusCondition.mapping),
"detail": S("detail"),
"end_time": S("endTime"),
"container_operation_error": S("error", default={}) >> Bend(GcpContainerStatus.mapping),
"location": S("location"),
"nodepool_conditions": S("nodepoolConditions", default=[]) >> ForallBend(GcpContainerStatusCondition.mapping),
"operation_type": S("operationType"),
"container_operation_progress": S("progress", default={}) >> Bend(GcpContainerOperationProgress.mapping),
"start_time": S("startTime"),
"status": S("status"),
"status_message": S("statusMessage"),
"target_link": S("targetLink"),
}
cluster_conditions: Optional[List[GcpContainerStatusCondition]] = field(default=None)
detail: Optional[str] = field(default=None)
end_time: Optional[datetime] = field(default=None)
container_operation_error: Optional[GcpContainerStatus] = field(default=None)
location: Optional[str] = field(default=None)
nodepool_conditions: Optional[List[GcpContainerStatusCondition]] = field(default=None)
operation_type: Optional[str] = field(default=None)
container_operation_progress: Optional[GcpContainerOperationProgress] = field(default=None)
start_time: Optional[datetime] = field(default=None)
status: Optional[str] = field(default=None)
status_message: Optional[str] = field(default=None)
target_link: Optional[str] = field(default=None)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
if self.target_link:
builder.add_edge(self, reverse=True, clazz=GcpContainerCluster, link=self.target_link)
resources = [GcpContainerCluster, GcpContainerOperation] | /resoto-plugin-gcp-3.6.5.tar.gz/resoto-plugin-gcp-3.6.5/resoto_plugin_gcp/resources/container.py | 0.824744 | 0.181807 | container.py | pypi |
from datetime import datetime
from typing import ClassVar, Dict, Optional, List
from attr import define, field
from resoto_plugin_gcp.gcp_client import GcpApiSpec
from resoto_plugin_gcp.resources.base import GcpResource, GcpDeprecationStatus, get_client
from resotolib.graph import Graph
from resotolib.json_bender import Bender, S, Bend, ForallBend
@define(eq=False, slots=False)
class GcpProjectteam:
kind: ClassVar[str] = "gcp_projectteam"
mapping: ClassVar[Dict[str, Bender]] = {"project_number": S("projectNumber"), "team": S("team")}
project_number: Optional[str] = field(default=None)
team: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpBucketAccessControl:
kind: ClassVar[str] = "gcp_bucket_access_control"
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("name").or_else(S("id")).or_else(S("selfLink")),
"tags": S("labels", default={}),
"name": S("name"),
"ctime": S("creationTimestamp"),
"description": S("description"),
"link": S("selfLink"),
"label_fingerprint": S("labelFingerprint"),
"deprecation_status": S("deprecated", default={}) >> Bend(GcpDeprecationStatus.mapping),
"bucket": S("bucket"),
"domain": S("domain"),
"email": S("email"),
"entity": S("entity"),
"entity_id": S("entityId"),
"etag": S("etag"),
"project_team": S("projectTeam", default={}) >> Bend(GcpProjectteam.mapping),
"role": S("role"),
}
bucket: Optional[str] = field(default=None)
domain: Optional[str] = field(default=None)
email: Optional[str] = field(default=None)
entity: Optional[str] = field(default=None)
entity_id: Optional[str] = field(default=None)
etag: Optional[str] = field(default=None)
project_team: Optional[GcpProjectteam] = field(default=None)
role: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpAutoclass:
kind: ClassVar[str] = "gcp_autoclass"
mapping: ClassVar[Dict[str, Bender]] = {"enabled": S("enabled"), "toggle_time": S("toggleTime")}
enabled: Optional[bool] = field(default=None)
toggle_time: Optional[datetime] = field(default=None)
@define(eq=False, slots=False)
class GcpCors:
kind: ClassVar[str] = "gcp_cors"
mapping: ClassVar[Dict[str, Bender]] = {
"max_age_seconds": S("maxAgeSeconds"),
"method": S("method", default=[]),
"origin": S("origin", default=[]),
"response_header": S("responseHeader", default=[]),
}
max_age_seconds: Optional[int] = field(default=None)
method: Optional[List[str]] = field(default=None)
origin: Optional[List[str]] = field(default=None)
response_header: Optional[List[str]] = field(default=None)
@define(eq=False, slots=False)
class GcpObjectAccessControl:
kind: ClassVar[str] = "gcp_object_access_control"
mapping: ClassVar[Dict[str, Bender]] = {
"bucket": S("bucket"),
"domain": S("domain"),
"email": S("email"),
"entity": S("entity"),
"entity_id": S("entityId"),
"etag": S("etag"),
"generation": S("generation"),
"id": S("id"),
"object": S("object"),
"project_team": S("projectTeam", default={}) >> Bend(GcpProjectteam.mapping),
"role": S("role"),
"self_link": S("selfLink"),
}
bucket: Optional[str] = field(default=None)
domain: Optional[str] = field(default=None)
email: Optional[str] = field(default=None)
entity: Optional[str] = field(default=None)
entity_id: Optional[str] = field(default=None)
etag: Optional[str] = field(default=None)
generation: Optional[str] = field(default=None)
id: Optional[str] = field(default=None)
object: Optional[str] = field(default=None)
project_team: Optional[GcpProjectteam] = field(default=None)
role: Optional[str] = field(default=None)
self_link: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpBucketpolicyonly:
kind: ClassVar[str] = "gcp_bucketpolicyonly"
mapping: ClassVar[Dict[str, Bender]] = {"enabled": S("enabled"), "locked_time": S("lockedTime")}
enabled: Optional[bool] = field(default=None)
locked_time: Optional[datetime] = field(default=None)
@define(eq=False, slots=False)
class GcpUniformbucketlevelaccess:
kind: ClassVar[str] = "gcp_uniformbucketlevelaccess"
mapping: ClassVar[Dict[str, Bender]] = {"enabled": S("enabled"), "locked_time": S("lockedTime")}
enabled: Optional[bool] = field(default=None)
locked_time: Optional[datetime] = field(default=None)
@define(eq=False, slots=False)
class GcpIamconfiguration:
kind: ClassVar[str] = "gcp_iamconfiguration"
mapping: ClassVar[Dict[str, Bender]] = {
"bucket_policy_only": S("bucketPolicyOnly", default={}) >> Bend(GcpBucketpolicyonly.mapping),
"public_access_prevention": S("publicAccessPrevention"),
"uniform_bucket_level_access": S("uniformBucketLevelAccess", default={})
>> Bend(GcpUniformbucketlevelaccess.mapping),
}
bucket_policy_only: Optional[GcpBucketpolicyonly] = field(default=None)
public_access_prevention: Optional[str] = field(default=None)
uniform_bucket_level_access: Optional[GcpUniformbucketlevelaccess] = field(default=None)
@define(eq=False, slots=False)
class GcpAction:
kind: ClassVar[str] = "gcp_action"
mapping: ClassVar[Dict[str, Bender]] = {"storage_class": S("storageClass"), "type": S("type")}
storage_class: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpCondition:
kind: ClassVar[str] = "gcp_condition"
mapping: ClassVar[Dict[str, Bender]] = {
"age": S("age"),
"created_before": S("createdBefore"),
"custom_time_before": S("customTimeBefore"),
"days_since_custom_time": S("daysSinceCustomTime"),
"days_since_noncurrent_time": S("daysSinceNoncurrentTime"),
"is_live": S("isLive"),
"matches_pattern": S("matchesPattern"),
"matches_prefix": S("matchesPrefix", default=[]),
"matches_storage_class": S("matchesStorageClass", default=[]),
"matches_suffix": S("matchesSuffix", default=[]),
"noncurrent_time_before": S("noncurrentTimeBefore"),
"num_newer_versions": S("numNewerVersions"),
}
age: Optional[int] = field(default=None)
created_before: Optional[str] = field(default=None)
custom_time_before: Optional[str] = field(default=None)
days_since_custom_time: Optional[datetime] = field(default=None) # should be int
days_since_noncurrent_time: Optional[datetime] = field(default=None) # should be int
is_live: Optional[bool] = field(default=None)
matches_pattern: Optional[str] = field(default=None)
matches_prefix: Optional[List[str]] = field(default=None)
matches_storage_class: Optional[List[str]] = field(default=None)
matches_suffix: Optional[List[str]] = field(default=None)
noncurrent_time_before: Optional[str] = field(default=None)
num_newer_versions: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class GcpRule:
kind: ClassVar[str] = "gcp_rule"
mapping: ClassVar[Dict[str, Bender]] = {
"action": S("action", default={}) >> Bend(GcpAction.mapping),
"condition": S("condition", default={}) >> Bend(GcpCondition.mapping),
}
action: Optional[GcpAction] = field(default=None)
condition: Optional[GcpCondition] = field(default=None)
@define(eq=False, slots=False)
class GcpLogging:
kind: ClassVar[str] = "gcp_logging"
mapping: ClassVar[Dict[str, Bender]] = {"log_bucket": S("logBucket"), "log_object_prefix": S("logObjectPrefix")}
log_bucket: Optional[str] = field(default=None)
log_object_prefix: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpOwner:
kind: ClassVar[str] = "gcp_owner"
mapping: ClassVar[Dict[str, Bender]] = {"entity": S("entity"), "entity_id": S("entityId")}
entity: Optional[str] = field(default=None)
entity_id: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpRetentionpolicy:
kind: ClassVar[str] = "gcp_retentionpolicy"
mapping: ClassVar[Dict[str, Bender]] = {
"effective_time": S("effectiveTime"),
"is_locked": S("isLocked"),
"retention_period": S("retentionPeriod"),
}
effective_time: Optional[datetime] = field(default=None)
is_locked: Optional[bool] = field(default=None)
retention_period: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpWebsite:
kind: ClassVar[str] = "gcp_website"
mapping: ClassVar[Dict[str, Bender]] = {
"main_page_suffix": S("mainPageSuffix"),
"not_found_page": S("notFoundPage"),
}
main_page_suffix: Optional[str] = field(default=None)
not_found_page: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpObject(GcpResource):
# GcpObjects are necessary to empty buckets before deletion
# they are not intended to be collected and stored in the graph
kind: ClassVar[str] = "gcp_object"
api_spec: ClassVar[GcpApiSpec] = GcpApiSpec(
service="storage",
version="v1",
accessors=["objects"],
action="list",
request_parameter={"bucket": "{bucket}"},
request_parameter_in={"bucket"},
response_path="items",
response_regional_sub_path=None,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("name").or_else(S("id")).or_else(S("selfLink")),
"name": S("name"),
}
@define(eq=False, slots=False)
class GcpBucket(GcpResource):
kind: ClassVar[str] = "gcp_bucket"
api_spec: ClassVar[GcpApiSpec] = GcpApiSpec(
service="storage",
version="v1",
accessors=["buckets"],
action="list",
request_parameter={"project": "{project}"},
request_parameter_in={"project"},
# single_request_parameter={"project": "{project}"},
# single_request_parameter_in={"project"},
response_path="items",
response_regional_sub_path=None,
mutate_iam_permissions=["storage.buckets.update", "storage.buckets.delete"],
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("name").or_else(S("id")).or_else(S("selfLink")),
"tags": S("labels", default={}),
"name": S("name"),
"ctime": S("creationTimestamp"),
"mtime": S("updated"),
"description": S("description"),
"link": S("selfLink"),
"label_fingerprint": S("labelFingerprint"),
"deprecation_status": S("deprecated", default={}) >> Bend(GcpDeprecationStatus.mapping),
"acl": S("acl", default=[]) >> ForallBend(GcpBucketAccessControl.mapping),
"autoclass": S("autoclass", default={}) >> Bend(GcpAutoclass.mapping),
"requester_pays": S("billing", "requesterPays"),
"cors": S("cors", default=[]) >> ForallBend(GcpCors.mapping),
"custom_placement_config": S("customPlacementConfig", "data_locations", default=[]),
"default_event_based_hold": S("defaultEventBasedHold"),
"default_object_acl": S("defaultObjectAcl", default=[]) >> ForallBend(GcpObjectAccessControl.mapping),
"encryption_default_kms_key_name": S("encryption", "defaultKmsKeyName"),
"etag": S("etag"),
"iam_configuration": S("iamConfiguration", default={}) >> Bend(GcpIamconfiguration.mapping),
"lifecycle_rule": S("lifecycle", "rule", default=[]) >> ForallBend(GcpRule.mapping),
"location": S("location"),
"location_type": S("locationType"),
"logging": S("logging", default={}) >> Bend(GcpLogging.mapping),
"metageneration": S("metageneration"),
"bucket_owner": S("owner", default={}) >> Bend(GcpOwner.mapping),
"project_number": S("projectNumber"),
"retention_policy": S("retentionPolicy", default={}) >> Bend(GcpRetentionpolicy.mapping),
"rpo": S("rpo"),
"satisfies_pzs": S("satisfiesPZS"),
"storage_class": S("storageClass"),
"time_created": S("timeCreated"),
"updated": S("updated"),
"versioning_enabled": S("versioning", "enabled"),
"bucket_website": S("website", default={}) >> Bend(GcpWebsite.mapping),
}
acl: Optional[List[GcpBucketAccessControl]] = field(default=None)
autoclass: Optional[GcpAutoclass] = field(default=None)
cors: Optional[List[GcpCors]] = field(default=None)
custom_placement_config_data_locations: Optional[List[str]] = field(default=None)
default_event_based_hold: Optional[bool] = field(default=None)
default_object_acl: Optional[List[GcpObjectAccessControl]] = field(default=None)
encryption_default_kms_key_name: Optional[str] = field(default=None)
etag: Optional[str] = field(default=None)
iam_configuration: Optional[GcpIamconfiguration] = field(default=None)
location: Optional[str] = field(default=None)
location_type: Optional[str] = field(default=None)
logging: Optional[GcpLogging] = field(default=None)
metageneration: Optional[str] = field(default=None)
bucket_owner: Optional[GcpOwner] = field(default=None)
project_number: Optional[str] = field(default=None)
retention_policy: Optional[GcpRetentionpolicy] = field(default=None)
rpo: Optional[str] = field(default=None)
satisfies_pzs: Optional[bool] = field(default=None)
storage_class: Optional[str] = field(default=None)
time_created: Optional[datetime] = field(default=None)
updated: Optional[datetime] = field(default=None)
bucket_website: Optional[GcpWebsite] = field(default=None)
requester_pays: Optional[bool] = field(default=None)
versioning_enabled: Optional[bool] = field(default=None)
lifecycle_rule: List[GcpRule] = field(factory=list)
def pre_delete(self, graph: Graph) -> bool:
client = get_client(self)
objects = client.list(GcpObject.api_spec, bucket=self.name)
for obj in objects:
object_in_bucket = GcpObject.from_api(obj)
client.delete(
object_in_bucket.api_spec.for_delete(),
bucket=self.name,
resource=object_in_bucket.name,
)
return True
def delete(self, graph: Graph) -> bool:
client = get_client(self)
api_spec = self.api_spec.for_delete()
api_spec.request_parameter = {"bucket": "{bucket}"}
client.delete(
api_spec,
bucket=self.name,
)
return True
def update_tag(self, key: str, value: Optional[str]) -> bool:
client = get_client(self)
labels = dict(self.tags)
if value is None:
if key in labels:
del labels[key]
else:
return False
else:
labels.update({key: value})
api_spec = self.api_spec.for_set_labels()
api_spec.action = "patch"
api_spec.request_parameter = {"bucket": "{bucket}"}
client.set_labels(
api_spec,
body={"labels": labels},
bucket=self.name,
)
return True
def delete_tag(self, key: str) -> bool:
return self.update_tag(key, None)
resources = [GcpBucket] | /resoto-plugin-gcp-3.6.5.tar.gz/resoto-plugin-gcp-3.6.5/resoto_plugin_gcp/resources/storage.py | 0.804367 | 0.226142 | storage.py | pypi |
from datetime import datetime
from attrs import define
from typing import Optional, ClassVar, List, Dict, Any, Union
from resotolib.graph import Graph
from resotolib.logger import log
from resotolib.baseresources import (
BaseAccount,
BaseRegion,
BaseResource,
BaseUser,
)
from resotolib.utils import make_valid_timestamp
from github.Repository import Repository
from github.Organization import Organization
from github.NamedUser import NamedUser
from github.Clones import Clones
from github.View import View
from github.Referrer import Referrer
from github.Path import Path
from github.GithubException import GithubException
from github.PullRequest import PullRequest
@define(eq=False, slots=False)
class GithubAccount(BaseAccount):
kind: ClassVar[str] = "github_account"
def delete(self, graph: Graph) -> bool:
return False
@define(eq=False, slots=False)
class GithubRegion(BaseRegion):
kind: ClassVar[str] = "github_region"
def delete(self, graph: Graph) -> bool:
return False
@define(eq=False, slots=False)
class GithubResource:
kind: ClassVar[str] = "github_resource"
def delete(self, graph: Graph) -> bool:
return False
def update_tag(self, key, value) -> bool:
return False
def delete_tag(self, key) -> bool:
return False
@define(eq=False, slots=False)
class GithubOrg(GithubResource, BaseResource):
kind: ClassVar[str] = "github_org"
avatar_url: Optional[str] = None
billing_email: Optional[str] = None
blog: Optional[str] = None
collaborators: Optional[int] = None
company: Optional[str] = None
created_at: Optional[datetime] = None
default_repository_permission: Optional[str] = None
description: Optional[str] = None
disk_usage: Optional[int] = None
email: Optional[str] = None
events_url: Optional[str] = None
followers: Optional[int] = None
following: Optional[int] = None
gravatar_id: Optional[str] = None
has_organization_projects: Optional[bool] = None
has_repository_projects: Optional[bool] = None
hooks_url: Optional[str] = None
html_url: Optional[str] = None
org_id: Optional[int] = None
issues_url: Optional[str] = None
org_location: Optional[str] = None
login: Optional[str] = None
members_can_create_repositories: Optional[bool] = None
members_url: Optional[str] = None
owned_private_repos: Optional[int] = None
private_gists: Optional[int] = None
public_gists: Optional[int] = None
public_members_url: Optional[str] = None
public_repos: Optional[int] = None
repos_url: Optional[str] = None
total_private_repos: Optional[int] = None
two_factor_requirement_enabled: Optional[bool] = None
org_type: Optional[str] = None
updated_at: Optional[datetime] = None
url: Optional[str] = None
@staticmethod
def new(org: Organization) -> BaseResource:
return GithubOrg(
id=str(org.login),
name=org.name,
avatar_url=org.avatar_url,
billing_email=org.billing_email,
blog=org.blog,
collaborators=org.collaborators,
company=org.company,
created_at=make_valid_timestamp(org.created_at),
ctime=make_valid_timestamp(org.created_at),
default_repository_permission=org.default_repository_permission,
description=org.description,
disk_usage=org.disk_usage,
email=org.email,
events_url=org.events_url,
followers=org.followers,
following=org.following,
gravatar_id=org.gravatar_id,
has_organization_projects=org.has_organization_projects,
has_repository_projects=org.has_repository_projects,
hooks_url=org.hooks_url,
html_url=org.html_url,
org_id=org.id,
issues_url=org.issues_url,
org_location=org.location,
login=org.login,
members_can_create_repositories=org.members_can_create_repositories,
members_url=org.members_url,
owned_private_repos=org.owned_private_repos,
private_gists=org.private_gists,
public_gists=org.public_gists,
public_members_url=org.public_members_url,
public_repos=org.public_repos,
repos_url=org.repos_url,
total_private_repos=org.total_private_repos,
org_type=org.type,
updated_at=make_valid_timestamp(org.updated_at),
mtime=make_valid_timestamp(org.updated_at),
url=org.url,
)
@define(eq=False, slots=False)
class GithubUser(GithubResource, BaseUser):
kind: ClassVar[str] = "github_user"
avatar_url: Optional[str] = None
bio: Optional[str] = None
blog: Optional[str] = None
collaborators: Optional[int] = None
company: Optional[str] = None
contributions: Optional[int] = None
created_at: Optional[datetime] = None
disk_usage: Optional[int] = None
email: Optional[str] = None
events_url: Optional[str] = None
followers: Optional[int] = None
followers_url: Optional[str] = None
following: Optional[int] = None
following_url: Optional[str] = None
gists_url: Optional[str] = None
gravatar_id: Optional[str] = None
hireable: Optional[bool] = None
html_url: Optional[str] = None
user_id: Optional[int] = None
invitation_teams_url: Optional[str] = None
user_location: Optional[str] = None
login: Optional[str] = None
name: Optional[str] = None
node_id: Optional[int] = None
organizations_url: Optional[str] = None
owned_private_repos: Optional[int] = None
private_gists: Optional[int] = None
public_gists: Optional[int] = None
public_repos: Optional[int] = None
received_events_url: Optional[str] = None
repos_url: Optional[str] = None
role: Optional[str] = None
site_admin: Optional[bool] = None
starred_url: Optional[str] = None
subscriptions_url: Optional[str] = None
suspended_at: Optional[datetime] = None
team_count: Optional[int] = None
total_private_repos: Optional[int] = None
twitter_username: Optional[str] = None
user_type: Optional[str] = None
updated_at: Optional[datetime] = None
url: Optional[str] = None
@staticmethod
def new(user: NamedUser) -> BaseResource:
return GithubUser(
id=str(user.login),
avatar_url=user.avatar_url,
bio=user.bio,
blog=user.blog,
collaborators=user.collaborators,
company=user.company,
contributions=user.contributions,
created_at=make_valid_timestamp(user.created_at),
ctime=make_valid_timestamp(user.created_at),
disk_usage=user.disk_usage,
email=user.email,
events_url=user.events_url,
followers=user.followers,
followers_url=user.followers_url,
following=user.following,
following_url=user.following_url,
gists_url=user.gists_url,
gravatar_id=user.gravatar_id,
hireable=user.hireable,
html_url=user.html_url,
user_id=user.id,
invitation_teams_url=user.invitation_teams_url,
user_location=user.location,
login=user.login,
name=user.name,
node_id=user.id,
organizations_url=user.organizations_url,
owned_private_repos=user.owned_private_repos,
private_gists=user.private_gists,
public_gists=user.public_gists,
public_repos=user.public_repos,
received_events_url=user.received_events_url,
repos_url=user.repos_url,
role=user.role,
site_admin=user.site_admin,
starred_url=user.starred_url,
subscriptions_url=user.subscriptions_url,
suspended_at=make_valid_timestamp(user.suspended_at),
team_count=user.team_count,
total_private_repos=user.total_private_repos,
twitter_username=user.twitter_username,
user_type=user.type,
updated_at=make_valid_timestamp(user.updated_at),
mtime=make_valid_timestamp(user.updated_at),
url=user.url,
)
@define(eq=False, slots=False)
class GithubRepoClones:
kind: ClassVar[str] = "github_repo_clones"
timestamp: Optional[datetime] = None
count: Optional[int] = None
uniques: Optional[int] = None
@staticmethod
def new(clones: Clones):
return GithubRepoClones(
timestamp=make_valid_timestamp(clones.timestamp), count=clones.count, uniques=clones.uniques
)
@define(eq=False, slots=False)
class GithubRepoClonesTraffic:
kind: ClassVar[str] = "github_repo_clones_traffic"
count: Optional[int] = None
uniques: Optional[int] = None
clones: Optional[List[GithubRepoClones]] = None
@staticmethod
def new(clones_traffic: Optional[Dict[str, Any]]):
if clones_traffic is None:
return None
return GithubRepoClonesTraffic(
count=clones_traffic.get("count"),
uniques=clones_traffic.get("uniques"),
clones=[GithubRepoClones.new(clones) for clones in clones_traffic.get("clones", [])],
)
@define(eq=False, slots=False)
class GithubRepoView:
kind: ClassVar[str] = "github_repo_view"
timestamp: Optional[datetime] = None
count: Optional[int] = None
uniques: Optional[int] = None
@staticmethod
def new(view: View):
return GithubRepoView(timestamp=make_valid_timestamp(view.timestamp), count=view.count, uniques=view.uniques)
@define(eq=False, slots=False)
class GithubRepoViewsTraffic:
kind: ClassVar[str] = "github_repo_views_traffic"
count: Optional[int] = None
uniques: Optional[int] = None
views: Optional[List[GithubRepoView]] = None
@staticmethod
def new(views_traffic: Optional[Dict[str, Any]]):
if views_traffic is None:
return None
return GithubRepoViewsTraffic(
count=views_traffic.get("count"),
uniques=views_traffic.get("uniques"),
views=[GithubRepoView.new(view) for view in views_traffic.get("views", [])],
)
@define(eq=False, slots=False)
class GithubRepoTopReferrer:
kind: ClassVar[str] = "github_repo_top_referrer"
referrer: Optional[str] = None
count: Optional[int] = None
uniques: Optional[int] = None
@staticmethod
def new(referrer: Referrer):
return GithubRepoTopReferrer(referrer=referrer.referrer, count=referrer.count, uniques=referrer.uniques)
@define(eq=False, slots=False)
class GithubRepoTopPath:
kind: ClassVar[str] = "github_repo_top_path"
title: Optional[str] = None
path: Optional[str] = None
count: Optional[int] = None
uniques: Optional[int] = None
@staticmethod
def new(path: Path):
return GithubRepoTopPath(title=path.title, path=path.path, count=path.count, uniques=path.uniques)
@define(eq=False, slots=False)
class GithubPullRequest(GithubResource, BaseResource):
kind: ClassVar[str] = "github_pull_request"
additions: Optional[int] = None
# assignee: Optional[str] = None
# assignees: Optional[List[str]] = None
# base: Optional[str] = None
body: Optional[str] = None
changed_files: Optional[int] = None
closed_at: Optional[datetime] = None
comments: Optional[int] = None
comments_url: Optional[str] = None
commits: Optional[int] = None
commits_url: Optional[str] = None
created_at: Optional[datetime] = None
deletions: Optional[int] = None
diff_url: Optional[str] = None
draft: Optional[bool] = None
# head: Optional[str] = None
html_url: Optional[str] = None
pr_id: Optional[int] = None
issue_url: Optional[str] = None
# labels: Optional[List[str]] = None
merge_commit_sha: Optional[str] = None
mergeable: Optional[bool] = None
mergeable_state: Optional[str] = None
merged: Optional[bool] = None
merged_at: Optional[datetime] = None
# merged_by: Optional[str] = None
# milestone: Optional[str] = None
number: Optional[int] = None
patch_url: Optional[str] = None
rebaseable: Optional[bool] = None
review_comments: Optional[int] = None
review_comments_url: Optional[str] = None
state: Optional[str] = None
title: Optional[str] = None
updated_at: Optional[datetime] = None
url: Optional[str] = None
# user: Optional[str] = None
maintainer_can_modify: Optional[bool] = None
@staticmethod
def new(pr: PullRequest):
return GithubPullRequest(
name=str(pr.title),
additions=pr.additions,
# assignee=pr.assignee,
# assignees=pr.assignees,
# base=pr.base,
body=pr.body,
changed_files=pr.changed_files,
closed_at=make_valid_timestamp(pr.closed_at),
comments=pr.comments,
comments_url=pr.comments_url,
commits=pr.commits,
commits_url=pr.commits_url,
created_at=make_valid_timestamp(pr.created_at),
ctime=make_valid_timestamp(pr.created_at),
deletions=pr.deletions,
diff_url=pr.diff_url,
draft=pr.draft,
# head=pr.head,
html_url=pr.html_url,
pr_id=pr.id,
issue_url=pr.issue_url,
# labels=pr.labels,
merge_commit_sha=pr.merge_commit_sha,
mergeable=pr.mergeable,
mergeable_state=pr.mergeable_state,
merged=pr.merged,
merged_at=make_valid_timestamp(pr.merged_at),
# merged_by=pr.merged_by,
# milestone=pr.milestone,
number=pr.number,
id=str(pr.number),
patch_url=pr.patch_url,
rebaseable=pr.rebaseable,
review_comments=pr.review_comments,
review_comments_url=pr.review_comments_url,
state=pr.state,
title=pr.title,
updated_at=make_valid_timestamp(pr.updated_at),
mtime=make_valid_timestamp(pr.updated_at),
url=pr.url,
# user=pr.user,
maintainer_can_modify=pr.maintainer_can_modify,
)
@define(eq=False, slots=False)
class GithubRepo(GithubResource, BaseResource):
kind: ClassVar[str] = "github_repo"
allow_merge_commit: Optional[bool] = None
allow_rebase_merge: Optional[bool] = None
allow_squash_merge: Optional[bool] = None
archived: Optional[bool] = None
archive_url: Optional[str] = None
assignees_url: Optional[str] = None
blobs_url: Optional[str] = None
branches_url: Optional[str] = None
clone_url: Optional[str] = None
clones_traffic: Optional[GithubRepoClonesTraffic] = None
collaborators_url: Optional[str] = None
comments_url: Optional[str] = None
commits_url: Optional[str] = None
compare_url: Optional[str] = None
contents_url: Optional[str] = None
contributors_count: Optional[int] = None
contributors_url: Optional[str] = None
created_at: Optional[datetime] = None
default_branch: Optional[str] = None
delete_branch_on_merge: Optional[bool] = None
deployments_url: Optional[str] = None
description: Optional[str] = None
downloads_url: Optional[str] = None
events_url: Optional[str] = None
fork: Optional[bool] = None
forks: Optional[int] = None
forks_count: Optional[int] = None
forks_url: Optional[str] = None
full_name: Optional[str] = None
git_commits_url: Optional[str] = None
git_refs_url: Optional[str] = None
git_tags_url: Optional[str] = None
git_url: Optional[str] = None
has_downloads: Optional[bool] = None
has_issues: Optional[bool] = None
has_pages: Optional[bool] = None
has_projects: Optional[bool] = None
has_wiki: Optional[bool] = None
homepage: Optional[str] = None
hooks_url: Optional[str] = None
html_url: Optional[str] = None
repo_id: Optional[int] = None
issue_comment_url: Optional[str] = None
issue_events_url: Optional[str] = None
issues_url: Optional[str] = None
keys_url: Optional[str] = None
labels_url: Optional[str] = None
language: Optional[str] = None
languages_url: Optional[str] = None
master_branch: Optional[str] = None
merges_url: Optional[str] = None
milestones_url: Optional[str] = None
mirror_url: Optional[str] = None
name: Optional[str] = None
network_count: Optional[int] = None
notifications_url: Optional[str] = None
open_issues: Optional[int] = None
open_issues_count: Optional[int] = None
private: Optional[bool] = None
pulls_url: Optional[str] = None
pushed_at: Optional[datetime] = None
releases_url: Optional[str] = None
size: Optional[int] = None
ssh_url: Optional[str] = None
stargazers_count: Optional[int] = None
stargazers_url: Optional[str] = None
statuses_url: Optional[str] = None
subscribers_count: Optional[int] = None
subscribers_url: Optional[str] = None
subscription_url: Optional[str] = None
svn_url: Optional[str] = None
tags_url: Optional[str] = None
teams_url: Optional[str] = None
top_paths: Optional[List[GithubRepoTopPath]] = None
top_referrers: Optional[List[GithubRepoTopReferrer]] = None
trees_url: Optional[str] = None
updated_at: Optional[datetime] = None
url: Optional[str] = None
watchers: Optional[int] = None
watchers_count: Optional[int] = None
views_traffic: Optional[GithubRepoViewsTraffic] = None
@staticmethod
def new(repo: Repository):
return GithubRepo(
id=repo.name,
name=repo.name,
allow_merge_commit=repo.allow_merge_commit,
allow_rebase_merge=repo.allow_rebase_merge,
allow_squash_merge=repo.allow_squash_merge,
archived=repo.archived,
archive_url=repo.archive_url,
assignees_url=repo.assignees_url,
blobs_url=repo.blobs_url,
branches_url=repo.branches_url,
clone_url=repo.clone_url,
collaborators_url=repo.collaborators_url,
comments_url=repo.comments_url,
commits_url=repo.commits_url,
compare_url=repo.compare_url,
contents_url=repo.contents_url,
contributors_url=repo.contributors_url,
created_at=make_valid_timestamp(repo.created_at),
ctime=make_valid_timestamp(repo.created_at),
default_branch=repo.default_branch,
delete_branch_on_merge=repo.delete_branch_on_merge,
deployments_url=repo.deployments_url,
description=repo.description,
downloads_url=repo.downloads_url,
events_url=repo.events_url,
fork=repo.fork,
forks=repo.forks,
forks_count=repo.forks_count,
forks_url=repo.forks_url,
full_name=repo.full_name,
git_commits_url=repo.git_commits_url,
git_refs_url=repo.git_refs_url,
git_tags_url=repo.git_tags_url,
git_url=repo.git_url,
has_downloads=repo.has_downloads,
has_issues=repo.has_issues,
has_pages=repo.has_pages,
has_projects=repo.has_projects,
has_wiki=repo.has_wiki,
homepage=repo.homepage,
hooks_url=repo.hooks_url,
html_url=repo.html_url,
repo_id=repo.id,
issue_comment_url=repo.issue_comment_url,
issue_events_url=repo.issue_events_url,
issues_url=repo.issues_url,
keys_url=repo.keys_url,
labels_url=repo.labels_url,
language=repo.language,
languages_url=repo.languages_url,
master_branch=repo.master_branch,
merges_url=repo.merges_url,
milestones_url=repo.milestones_url,
mirror_url=repo.mirror_url,
network_count=repo.network_count,
notifications_url=repo.notifications_url,
open_issues=repo.open_issues,
open_issues_count=repo.open_issues_count,
private=repo.private,
pulls_url=repo.pulls_url,
pushed_at=make_valid_timestamp(repo.pushed_at),
releases_url=repo.releases_url,
size=repo.size,
ssh_url=repo.ssh_url,
stargazers_count=repo.stargazers_count,
stargazers_url=repo.stargazers_url,
statuses_url=repo.statuses_url,
subscribers_count=repo.subscribers_count,
subscribers_url=repo.subscribers_url,
subscription_url=repo.subscription_url,
svn_url=repo.svn_url,
tags_url=repo.tags_url,
teams_url=repo.teams_url,
trees_url=repo.trees_url,
updated_at=make_valid_timestamp(repo.updated_at),
mtime=make_valid_timestamp(repo.updated_at),
url=repo.url,
watchers=repo.watchers,
watchers_count=repo.watchers_count,
clones_traffic=GithubRepoClonesTraffic.new(get_clones_traffic(repo)),
views_traffic=GithubRepoViewsTraffic.new(get_views_traffic(repo)),
top_referrers=[GithubRepoTopReferrer.new(referrer) for referrer in get_top_referrers(repo)],
top_paths=[GithubRepoTopPath.new(path) for path in get_top_paths(repo)],
contributors_count=len(list(repo.get_contributors())),
)
def get_clones_traffic(repo: Repository) -> Optional[Dict[str, Union[int, List[Clones]]]]:
try:
return repo.get_clones_traffic()
except GithubException as e:
log.debug(f"Failed to get clones traffic for {repo.full_name}: {e}")
return None
def get_views_traffic(repo: Repository) -> Optional[Dict[str, Union[int, List[View]]]]:
try:
return repo.get_views_traffic()
except GithubException as e:
log.debug(f"Failed to get views traffic for {repo.full_name}: {e}")
return None
def get_top_referrers(repo: Repository) -> List[Referrer]:
try:
return repo.get_top_referrers()
except GithubException as e:
log.debug(f"Failed to get top referrers for {repo.full_name}: {e}")
return []
def get_top_paths(repo: Repository) -> List[Path]:
try:
return repo.get_top_paths()
except GithubException as e:
log.debug(f"Failed to get top paths for {repo.full_name}: {e}")
return [] | /resoto_plugin_github-3.6.5-py3-none-any.whl/resoto_plugin_github/resources.py | 0.649134 | 0.189878 | resources.py | pypi |
import logging
from abc import ABC, abstractmethod
from functools import cached_property
from tempfile import TemporaryDirectory
from textwrap import dedent
from threading import RLock
from typing import ClassVar, TypeVar, Any, Callable
from typing import List, Type, Optional, Tuple, Dict
import yaml
from attrs import define, field
from kubernetes.client import ApiClient, Configuration, ApiException
from kubernetes.config import load_kube_config, list_kube_config_contexts
from resotolib.baseresources import BaseResource, EdgeType
from resotolib.config import Config
from resotolib.core.actions import CoreFeedback
from resotolib.graph import Graph
from resotolib.json import from_json as from_js
from resotolib.json_bender import S, bend, Bender, Sort, AsDate
from resotolib.proc import num_default_threads
from resotolib.types import Json
from resotolib.utils import rnd_str
log = logging.getLogger("resoto.plugins.k8s")
SortTransitionTime = Sort(S("lastTransitionTime") >> AsDate())
@define(eq=False, slots=False)
class KubernetesResource(BaseResource):
kind: ClassVar[str] = "kubernetes_resource"
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("metadata", "uid"),
"tags": S("metadata", "annotations", default={}),
"name": S("metadata", "name"),
"ctime": S("metadata", "creationTimestamp"),
"mtime": (S("status", "conditions") >> SortTransitionTime)[-1]["lastTransitionTime"],
"resource_version": S("metadata", "resourceVersion"),
"namespace": S("metadata", "namespace"),
"labels": S("metadata", "labels", default={}),
}
resource_version: Optional[str] = None
namespace: Optional[str] = None
labels: Dict[str, str] = field(factory=dict)
@classmethod
def from_json(cls: Type["KubernetesResource"], json: Json) -> "KubernetesResource":
mapped = bend(cls.mapping, json)
return from_js(mapped, cls)
@classmethod
def k8s_name(cls: Type["KubernetesResource"]) -> str:
return cls.__name__.removeprefix("Kubernetes")
def api_client(self) -> "K8sClient":
if account := self.account():
account_id = account.id
if cfg := K8sConfig.current_config():
return cfg.client_for(account_id)
raise AttributeError(f"No API client for account: {account} or no client for account.")
def update_tag(self, key: str, value: str) -> bool:
self.api_client().patch_resource(
self.__class__, self.namespace, self.name, {"metadata": {"annotations": {key: value}}}
)
return True
def delete_tag(self, key: str) -> bool:
self.api_client().patch_resource(
self.__class__, self.namespace, self.name, {"metadata": {"annotations": {key: None}}}
)
return True
def delete(self, graph: Graph) -> bool:
self.api_client().delete_resource(self.__class__, self.namespace, self.name)
return True
def connect_in_graph(self, builder: Any, source: Json) -> None:
# https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/
for ref in bend(S("metadata", "ownerReferences", default=[]), source):
owner = builder.node(id=ref["uid"])
block_owner_deletion = ref.get("blockOwnerDeletion", False)
if owner:
log.debug(f"Add owner reference from {owner} -> {self}")
builder.graph.add_edge(owner, self, edge_type=EdgeType.default)
if block_owner_deletion:
builder.graph.add_edge(self, owner, edge_type=EdgeType.delete)
def __str__(self) -> str:
return f"{self.kind}:{self.name}"
KubernetesResourceType = TypeVar("KubernetesResourceType", bound=KubernetesResource)
AlwaysAllowed = {"kubernetes_namespace"}
@define
class K8sAccess:
kind: ClassVar[str] = "k8s_access"
name: str = field(metadata={"description": "The name of the kubernetes cluster."})
server: str = field(metadata={"description": "The url of the server to connect to."})
token: str = field(metadata={"description": "The user access token to use to access this cluster."})
certificate_authority_data: Optional[str] = field(
default=None, metadata={"description": "Optional CA certificate string."}
)
def to_json(self) -> Json:
ca = {"certificate-authority-data": self.certificate_authority_data} if self.certificate_authority_data else {}
return {
"apiVersion": "v1",
"kind": "Config",
"clusters": [{"cluster": {"server": self.server, **ca}, "name": self.name}],
"contexts": [{"context": {"cluster": self.name, "user": "access" + self.name}, "name": self.name}],
"current-context": self.name,
"preferences": {},
"users": [{"name": "access" + self.name, "user": {"token": self.token}}],
}
@define
class K8sConfigFile:
kind: ClassVar[str] = "k8s_config_file"
path: str = field(metadata={"description": "Path to the kubeconfig file."})
contexts: List[str] = field(
factory=list,
metadata={
"description": "The contexts to use in the specified config file.\n"
"You can also set all_contexts to true to use all contexts."
},
)
all_contexts: bool = field(
default=True,
metadata={"description": "Collect all contexts found in the kubeconfig file."},
)
@define(slots=False)
class K8sConfig:
kind: ClassVar[str] = "k8s"
configs: List[Json] = field(
factory=list,
metadata={
"description": "List of kubernetes configurations. "
"Copy and paste your k8s configuration file here as one entry."
},
)
config_files: List[K8sConfigFile] = field(
factory=list,
metadata={
"description": dedent(
"""
Configure access via kubeconfig files.
Structure:
- path: "/path/to/kubeconfig"
all_contexts: false
contexts: ["context1", "context2"]
"""
).strip()
},
)
collect: List[str] = field(
factory=list,
metadata={"description": "Objects to collect (default: all)"},
)
no_collect: List[str] = field(
factory=list,
metadata={"description": "Objects to exclude (default: none)"},
)
pool_size: int = field(
factory=num_default_threads,
metadata={"description": "Thread/process pool size"},
)
fork_process: bool = field(
default=False,
metadata={"description": "Fork collector process instead of using threads"},
)
_clients: Optional[Dict[str, "K8sClient"]] = None
_temp_dir: Optional[TemporaryDirectory[str]] = None
_lock: RLock = field(factory=RLock)
def __getstate__(self) -> Dict[str, Any]:
d = self.__dict__.copy()
d.pop("_lock", None)
d.pop("_temp_dir", None)
d.pop("_clients", None)
return d
def __setstate__(self, d: Dict[str, Any]) -> None:
d["_lock"] = RLock()
self.__dict__.update(d)
def is_allowed(self, kind: str) -> bool:
return kind in AlwaysAllowed or ((not self.collect or kind in self.collect) and kind not in self.no_collect)
def cluster_access_configs(
self, tmp_dir: str, core_feedback: Optional[CoreFeedback] = None
) -> Dict[str, Configuration]:
with self._lock:
result = {}
cfg_files = self.config_files
# write all access configs as kubeconfig file and let the loader handle it
for ca in self.configs:
filename = tmp_dir + "/kube_config_" + rnd_str() + ".yaml"
with open(filename, "w") as f:
f.write(yaml.dump(ca))
cfg_files.append(K8sConfigFile(path=filename))
def load_context(path: Optional[str], cf_contexts: List[str], cf_all_contexts: bool) -> None:
try:
all_contexts, active_context = list_kube_config_contexts(path)
contexts = (
all_contexts if cf_all_contexts else [a for a in all_contexts if a["name"] in cf_contexts]
)
for ctx in contexts:
name = ctx["name"]
config = Configuration()
load_kube_config(path, name, client_configuration=config)
result[name] = config
except Exception as e:
msg = f"Failed to load kubeconfig from file {path}: {e}"
if core_feedback:
core_feedback.error(msg)
log.error(msg)
# load all kubeconfig files if given - otherwise use the default kubeconfig loader
if cfg_files:
for cf in cfg_files:
load_context(cf.path, cf.contexts, cf.all_contexts)
else:
load_context(None, [], True)
return result
def client_for(self, cluster_id: str, **kwargs: Any) -> "K8sClient":
# check if clients are already initialized
if not self._clients:
with self._lock:
if not self._clients:
if self._temp_dir is None:
self._temp_dir = TemporaryDirectory()
cfgs = self.cluster_access_configs(self._temp_dir.name)
factory = kwargs.get("client_factory", K8sApiClient.from_config)
self._clients = {name: factory(cluster_id, config) for name, config in cfgs.items()}
if cluster_id not in self._clients:
raise ValueError(f"No access config for cluster {cluster_id}")
return self._clients[cluster_id]
@staticmethod
def current_config() -> Optional["K8sConfig"]:
cfg = Config.running_config.data.get(K8sConfig.kind)
if isinstance(cfg, K8sConfig):
return cfg
return None
@staticmethod
def from_json(json: Json) -> "K8sConfig":
v1 = ["token", "context", "cluster", "apiserver", "config"]
def migrate_access(js: Json) -> Json:
return from_js(js, K8sAccess).to_json()
def at(ls: List[str], idx: int) -> str:
return ls[idx] if len(ls) > idx else ""
if any(k in json for k in v1):
log.info("Migrate k8s configuration from v1")
config = json.get("config", []) or []
cluster = json.get("cluster", []) or []
apiserver = json.get("apiserver", []) or []
token = json.get("token", []) or []
cacert = json.get("cacert", []) or []
context = json.get("context", []) or []
access = [
K8sAccess(at(cluster, i), at(apiserver, i), at(token, i), at(cacert, i)).to_json()
for i in range(len(cluster))
]
files = [
K8sConfigFile(at(config, i), [at(context, i)], json.get("all_contexts", False))
for i in range(len(config))
]
return K8sConfig(
configs=access,
config_files=files,
collect=json.get("collect", []),
no_collect=json.get("no_collect", []),
pool_size=json.get("pool_size", num_default_threads()),
fork_process=json.get("fork_process", False),
)
else:
# migrate k8s access to kubeconfig format if necessary
json["configs"] = [i if i.get("name") is None else migrate_access(i) for i in json.get("configs", [])]
return from_js(json, K8sConfig)
@define
class K8sApiResource:
base: str
name: str
kind: str
namespaced: bool
verbs: List[str]
@property
def list_path(self) -> str:
return self.base + "/" + self.name
class K8sClient(ABC):
@abstractmethod
def call_api(
self, method: str, path: str, body: Optional[Json] = None, headers: Optional[Dict[str, str]] = None
) -> Json:
pass
@property
@abstractmethod
def cluster_id(self) -> str:
pass
@property
@abstractmethod
def host(self) -> str:
pass
@abstractmethod
def with_feedback(self, core_feedback: CoreFeedback) -> "K8sClient":
pass
def get(self, path: str) -> Json:
return self.call_api("GET", path)
def patch(self, path: str, js: Json) -> Json:
return self.call_api("PATCH", path, js, {"Content-Type": "application/strategic-merge-patch+json"})
def delete(self, path: str) -> Json:
return self.call_api("DELETE", path)
def __api_for_kind(self, kind: str) -> Optional[K8sApiResource]:
for api in self.apis:
if api.kind == kind:
return api
return None
def __resource_path(
self, clazz: Type[KubernetesResourceType], namespace: Optional[str] = None, name: Optional[str] = None
) -> Optional[str]:
if api := self.__api_for_kind(clazz.k8s_name()):
if api.namespaced:
assert namespace is not None, "No namespace provided, but resource is namespaced"
assert name is not None, "No name given for resource"
ns = f"/namespaces/{namespace}/" if namespace else "/"
return f"{api.base}{ns}{api.name}/{name}"
return None
def patch_resource(
self, clazz: Type[KubernetesResourceType], namespace: Optional[str], name: Optional[str], patch: Json
) -> Optional[KubernetesResourceType]:
if path := self.__resource_path(clazz, namespace, name):
patched = self.patch(path, patch)
return clazz.from_json(patched) # type: ignore
raise AttributeError(f"No api available for this resource type: {clazz}")
def get_resource(
self, clazz: Type[KubernetesResourceType], namespace: Optional[str], name: Optional[str]
) -> Optional[KubernetesResourceType]:
if path := self.__resource_path(clazz, namespace, name):
return clazz.from_json(self.get(path)) # type: ignore
return None
def delete_resource(
self, clazz: Type[KubernetesResourceType], namespace: Optional[str], name: Optional[str]
) -> None:
if path := self.__resource_path(clazz, namespace, name):
self.delete(path)
@abstractmethod
def version(self) -> Json:
pass
@property
@abstractmethod
def apis(self) -> List[K8sApiResource]:
pass
@abstractmethod
def list_resources(
self, resource: K8sApiResource, clazz: Type[KubernetesResourceType], path: Optional[str] = None
) -> List[Tuple[KubernetesResourceType, Json]]:
pass
@staticmethod
def filter_apis(apis: List[K8sApiResource]) -> List[K8sApiResource]:
"""
K8s serves multiple apis for the same resource.
Example:
Ingress: served via '/apis/networking.k8s.io/v1' and '/apis/extensions/v1beta1' -> use the former
Event: served via '/api/v1' and '/apis/events.k8s.io/v1' -> use the latter
"""
known: Dict[str, K8sApiResource] = {}
def choose(
left: K8sApiResource, right: K8sApiResource, fns: List[Callable[[K8sApiResource], int]]
) -> K8sApiResource:
for fn in fns:
rl = fn(left)
rr = fn(right)
if res := right if rl > rr else left if rl < rr else None:
return res
# left and right match
log.warning(
"Multiple apis available for the same k8s resource type."
f"Kind: {left.kind} Left: {left.base} <-> {right.base}. Use {left.base}."
)
return left
for api in apis:
if api.kind in known and "beta" not in known[api.kind].base:
known[api.kind] = choose(
api, known[api.kind], [lambda x: 1 if "beta" in x.base else 0, lambda x: -len(x.base)]
)
else:
known[api.kind] = api
return list(known.values())
class K8sApiClient(K8sClient):
def __init__(self, cluster_id: str, api_client: ApiClient, core_feedback: Optional[CoreFeedback] = None):
self._cluster_id = cluster_id
self.api_client = api_client
self.core_feedback = core_feedback
def with_feedback(self, core_feedback: CoreFeedback) -> "K8sClient":
return K8sApiClient(self._cluster_id, self.api_client, core_feedback)
def call_api(
self, method: str, path: str, body: Optional[Json] = None, headers: Optional[Dict[str, str]] = None
) -> Json:
log.debug(f"Send request to k8s {method} {path}. body={body}")
result, code, header = self.api_client.call_api(
path,
method,
auth_settings=["BearerToken"],
response_type="object",
body=body,
header_params=headers,
)
log.debug(f"Response from {method} {path} {code}: {header}")
return result # type: ignore
@property
def cluster_id(self) -> str:
return self._cluster_id
@property
def host(self) -> str:
return self.api_client.configuration.host # type: ignore
def version(self) -> Json:
return self.get("/version")
@cached_property
def apis(self) -> List[K8sApiResource]:
result: List[K8sApiResource] = []
def add_resource(base: str, js: Json) -> None:
name = js["name"]
verbs = js["verbs"]
if "/" not in name and "list" in verbs:
result.append(K8sApiResource(base, name, js["kind"], js["namespaced"], verbs))
old_apis = self.get("/api/v1")
for resource in old_apis["resources"]:
add_resource("/api/v1", resource)
apis = self.get("/apis")
for group in apis["groups"]:
part = f'/apis/{group["preferredVersion"]["groupVersion"]}'
try:
resources = self.get(part)
for resource in resources["resources"]:
add_resource(part, resource)
except ApiException as ex:
msg = f"Failed to retrieve resource APIs for {part}. Reason: {ex}. Ignore."
if self.core_feedback:
self.core_feedback.error(msg)
log.warning(msg)
return self.filter_apis(result)
def list_resources(
self, resource: K8sApiResource, clazz: Type[KubernetesResourceType], path: Optional[str] = None
) -> List[Tuple[KubernetesResourceType, Json]]:
try:
result = self.get(path or resource.list_path)
return [(clazz.from_json(r), r) for r in result.get("items", [])] # type: ignore
except ApiException as ex:
msg = f"Failed to list resources: {resource.kind} on {resource.base}. Reason: {ex}. Ignore."
if self.core_feedback:
self.core_feedback.info(msg)
log.warning(msg)
return []
@staticmethod
def from_config(cluster_id: str, cluster_config: Configuration) -> "K8sApiClient":
return K8sApiClient(cluster_id, ApiClient(cluster_config)) | /resoto_plugin_k8s-3.6.5-py3-none-any.whl/resoto_plugin_k8s/base.py | 0.830937 | 0.159839 | base.py | pypi |
import logging
from threading import Lock
from attrs import define, field
from datetime import datetime
from typing import ClassVar, Optional, Dict, Type, List, Any, Union, Tuple, Set
from collections import defaultdict
from resoto_plugin_k8s.base import KubernetesResource, SortTransitionTime
from resotolib.baseresources import (
BaseAccount,
BaseInstance,
BaseRegion,
InstanceStatus,
BaseVolume,
BaseQuota,
BaseLoadBalancer,
EdgeType,
VolumeStatus,
ModelReference,
)
from resotolib.graph import Graph
from resotolib.json_bender import StringToUnitNumber, CPUCoresToNumber, Bend, F, S, K, bend, ForallBend, Bender, MapEnum
from resotolib.types import Json
log = logging.getLogger("resoto.plugins.k8s")
class GraphBuilder:
def __init__(self, graph: Graph):
self.graph = graph
self.name = getattr(graph.root, "name", "unknown")
self.graph_nodes_access = Lock()
self.graph_edges_access = Lock()
def node(self, clazz: Optional[Type[KubernetesResource]] = None, **node: Any) -> Optional[KubernetesResource]:
if isinstance(nd := node.get("node"), KubernetesResource):
return nd
with self.graph_nodes_access:
for n in self.graph:
is_clazz = isinstance(n, clazz) if clazz else True
if is_clazz and all(getattr(n, k, None) == v for k, v in node.items()):
return n # type: ignore
return None
def add_node(self, node: KubernetesResource, **kwargs: Any) -> None:
log.debug(f"{self.name}: add node {node}")
with self.graph_nodes_access:
self.graph.add_node(node, **kwargs)
def add_edge(
self, from_node: KubernetesResource, edge_type: EdgeType, reverse: bool = False, **to_node: Any
) -> None:
to_n = self.node(**to_node)
if to_n:
start, end = (to_n, from_node) if reverse else (from_node, to_n)
log.debug(f"{self.name}: add edge: {start} -> {end}")
with self.graph_edges_access:
self.graph.add_edge(start, end, edge_type=edge_type)
def add_edges_from_selector(
self,
from_node: KubernetesResource,
edge_type: EdgeType,
selector: Dict[str, str],
clazz: Optional[Union[type, Tuple[type, ...]]] = None,
) -> None:
with self.graph_nodes_access:
for to_n in self.graph:
is_clazz = isinstance(to_n, clazz) if clazz else True
if is_clazz and to_n != from_node and selector.items() <= to_n.labels.items():
log.debug(f"{self.name}: add edge from selector: {from_node} -> {to_n}")
with self.graph_edges_access:
self.graph.add_edge(from_node, to_n, edge_type=edge_type)
def connect_volumes(self, from_node: KubernetesResource, volumes: List[Json]) -> None:
for volume in volumes:
if "persistentVolumeClaim" in volume:
if name := bend(S("persistentVolumeClaim", "claimName"), volume):
self.add_edge(
from_node,
EdgeType.default,
name=name,
namespace=from_node.namespace,
clazz=KubernetesPersistentVolumeClaim,
)
elif "configMap" in volume:
if name := bend(S("configMap", "name"), volume):
self.add_edge(
from_node, EdgeType.default, name=name, namespace=from_node.namespace, clazz=KubernetesConfigMap
)
elif "secret" in volume:
if name := bend(S("secret", "secretName"), volume):
self.add_edge(
from_node, EdgeType.default, name=name, namespace=from_node.namespace, clazz=KubernetesSecret
)
elif "projected" in volume:
if sources := bend(S("projected", "sources"), volume):
# iterate all projected volumes
self.connect_volumes(from_node, sources)
# region node
@define(eq=False, slots=False)
class KubernetesNodeStatusAddresses:
kind: ClassVar[str] = "kubernetes_node_status_addresses"
mapping: ClassVar[Dict[str, Bender]] = {
"address": S("address"),
"type": S("type"),
}
address: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesNodeCondition:
kind: ClassVar[str] = "kubernetes_node_status_conditions"
mapping: ClassVar[Dict[str, Bender]] = {
"last_heartbeat_time": S("lastHeartbeatTime"),
"last_transition_time": S("lastTransitionTime"),
"message": S("message"),
"reason": S("reason"),
"status": S("status"),
"type": S("type"),
}
last_heartbeat_time: Optional[datetime] = field(default=None)
last_transition_time: Optional[datetime] = field(default=None)
message: Optional[str] = field(default=None)
reason: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesNodeStatusConfigSource:
kind: ClassVar[str] = "kubernetes_node_status_config_active_configmap"
mapping: ClassVar[Dict[str, Bender]] = {
"kubelet_config_key": S("kubeletConfigKey"),
"name": S("name"),
"namespace": S("namespace"),
"resource_version": S("resourceVersion"),
"uid": S("uid"),
}
kubelet_config_key: Optional[str] = field(default=None)
name: Optional[str] = field(default=None)
namespace: Optional[str] = field(default=None)
resource_version: Optional[str] = field(default=None)
uid: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesNodeConfigSource:
kind: ClassVar[str] = "kubernetes_node_status_config_active"
mapping: ClassVar[Dict[str, Bender]] = {
"config_map": S("configMap") >> Bend(KubernetesNodeStatusConfigSource.mapping),
}
config_map: Optional[KubernetesNodeStatusConfigSource] = field(default=None)
@define(eq=False, slots=False)
class KubernetesNodeStatusConfig:
kind: ClassVar[str] = "kubernetes_node_status_config"
mapping: ClassVar[Dict[str, Bender]] = {
"active": S("active") >> Bend(KubernetesNodeConfigSource.mapping),
"assigned": S("assigned") >> Bend(KubernetesNodeConfigSource.mapping),
"error": S("error"),
}
active: Optional[KubernetesNodeConfigSource] = field(default=None)
assigned: Optional[KubernetesNodeConfigSource] = field(default=None)
error: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesDaemonEndpoint:
kind: ClassVar[str] = "kubernetes_daemon_endpoint"
mapping: ClassVar[Dict[str, Bender]] = {
"port": S("Port"),
}
port: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class KubernetesNodeDaemonEndpoint:
kind: ClassVar[str] = "kubernetes_node_daemon_endpoint"
mapping: ClassVar[Dict[str, Bender]] = {
"kubelet_endpoint": S("kubeletEndpoint") >> Bend(KubernetesDaemonEndpoint.mapping),
}
kubelet_endpoint: Optional[KubernetesDaemonEndpoint] = field(default=None)
@define(eq=False, slots=False)
class KubernetesNodeStatusImages:
kind: ClassVar[str] = "kubernetes_node_status_images"
mapping: ClassVar[Dict[str, Bender]] = {
"names": S("names", default=[]),
"size_bytes": S("sizeBytes", default=0),
}
names: List[str] = field(factory=list)
size_bytes: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class KubernetesNodeSystemInfo:
kind: ClassVar[str] = "kubernetes_node_system_info"
mapping: ClassVar[Dict[str, Bender]] = {
"architecture": S("architecture"),
"boot_id": S("bootID"),
"container_runtime_version": S("containerRuntimeVersion"),
"kernel_version": S("kernelVersion"),
"kube_proxy_version": S("kubeProxyVersion"),
"kubelet_version": S("kubeletVersion"),
"machine_id": S("machineID"),
"operating_system": S("operatingSystem"),
"os_image": S("osImage"),
"system_uuid": S("systemUUID"),
}
architecture: Optional[str] = field(default=None)
boot_id: Optional[str] = field(default=None)
container_runtime_version: Optional[str] = field(default=None)
kernel_version: Optional[str] = field(default=None)
kube_proxy_version: Optional[str] = field(default=None)
kubelet_version: Optional[str] = field(default=None)
machine_id: Optional[str] = field(default=None)
operating_system: Optional[str] = field(default=None)
os_image: Optional[str] = field(default=None)
system_uuid: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesAttachedVolume:
kind: ClassVar[str] = "kubernetes_attached_volume"
mapping: ClassVar[Dict[str, Bender]] = {
"device_path": S("devicePath"),
"name": S("name"),
}
device_path: Optional[str] = field(default=None)
name: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesNodeStatus:
kind: ClassVar[str] = "kubernetes_node_status"
mapping: ClassVar[Dict[str, Bender]] = {
"addresses": S("addresses", default=[]) >> ForallBend(KubernetesNodeStatusAddresses.mapping),
"conditions": S("conditions", default=[]) >> SortTransitionTime >> ForallBend(KubernetesNodeCondition.mapping),
"config": S("config") >> Bend(KubernetesNodeStatusConfig.mapping),
"capacity": S("capacity"),
"daemon_endpoints": S("daemonEndpoints") >> Bend(KubernetesNodeDaemonEndpoint.mapping),
"images": S("images", default=[]) >> ForallBend(KubernetesNodeStatusImages.mapping),
"node_info": S("nodeInfo") >> Bend(KubernetesNodeSystemInfo.mapping),
"phase": S("phase"),
"volumes_attached": S("volumesAttached", default=[]) >> ForallBend(KubernetesAttachedVolume.mapping),
"volumes_in_use": S("volumesInUse", default=[]),
}
addresses: List[KubernetesNodeStatusAddresses] = field(factory=list)
capacity: Optional[Any] = field(default=None)
conditions: List[KubernetesNodeCondition] = field(factory=list)
config: Optional[KubernetesNodeStatusConfig] = field(default=None)
daemon_endpoints: Optional[KubernetesNodeDaemonEndpoint] = field(default=None)
images: List[KubernetesNodeStatusImages] = field(factory=list)
node_info: Optional[KubernetesNodeSystemInfo] = field(default=None)
phase: Optional[str] = field(default=None)
volumes_attached: List[KubernetesAttachedVolume] = field(factory=list)
volumes_in_use: List[str] = field(factory=list)
@define
class KubernetesTaint:
kind: ClassVar[str] = "kubernetes_taint"
mapping: ClassVar[Dict[str, Bender]] = {
"effect": S("effect"),
"key": S("key"),
"time_added": S("timeAdded"),
"value": S("value"),
}
effect: Optional[str] = field(default=None)
key: Optional[str] = field(default=None)
time_added: Optional[datetime] = field(default=None)
value: Optional[str] = field(default=None)
@define
class KubernetesNodeSpec:
kind: ClassVar[str] = "kubernetes_node_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"external_id": S("externalID"),
"pod_cidr": S("podCIDR"),
"pod_cidrs": S("podCIDRs", default=[]),
"provider_id": S("providerID"),
"taints": S("taints", default=[]) >> ForallBend(KubernetesTaint.mapping),
"unschedulable": S("unschedulable"),
}
external_id: Optional[str] = field(default=None)
pod_cidr: Optional[str] = field(default=None)
pod_cidrs: List[str] = field(factory=list)
provider_id: Optional[str] = field(default=None)
taints: List[KubernetesTaint] = field(factory=list)
unschedulable: Optional[bool] = field(default=None)
instance_status_map: Dict[str, InstanceStatus] = {
"Pending": InstanceStatus.BUSY,
"Running": InstanceStatus.RUNNING,
"Failed": InstanceStatus.TERMINATED,
"Succeeded": InstanceStatus.STOPPED,
"Unknown": InstanceStatus.UNKNOWN,
}
@define(eq=False, slots=False)
class KubernetesNode(KubernetesResource, BaseInstance):
kind: ClassVar[str] = "kubernetes_node"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"node_status": S("status") >> Bend(KubernetesNodeStatus.mapping),
"node_spec": S("spec") >> Bend(KubernetesNodeSpec.mapping),
"provider_id": S("spec", "providerID"),
"instance_cores": S("status", "capacity", "cpu") >> CPUCoresToNumber(),
"instance_memory": S("status", "capacity", "memory") >> StringToUnitNumber("GiB"),
"instance_type": K("kubernetes_node"),
"instance_status": K(InstanceStatus.RUNNING.value),
}
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["kubernetes_csi_node", "kubernetes_pod"],
"delete": [],
}
}
provider_id: Optional[str] = None
node_status: Optional[KubernetesNodeStatus] = field(default=None)
node_spec: Optional[KubernetesNodeSpec] = field(default=None)
# region pod
@define(eq=False, slots=False)
class KubernetesPodStatusConditions:
kind: ClassVar[str] = "kubernetes_pod_status_conditions"
mapping: ClassVar[Dict[str, Bender]] = {
"last_probe_time": S("lastProbeTime"),
"last_transition_time": S("lastTransitionTime"),
"message": S("message"),
"reason": S("reason"),
"status": S("status"),
"type": S("type"),
}
last_probe_time: Optional[datetime] = field(default=None)
last_transition_time: Optional[datetime] = field(default=None)
message: Optional[str] = field(default=None)
reason: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesContainerStateRunning:
kind: ClassVar[str] = "kubernetes_container_state_running"
mapping: ClassVar[Dict[str, Bender]] = {
"started_at": S("startedAt"),
}
started_at: Optional[datetime] = field(default=None)
@define(eq=False, slots=False)
class KubernetesContainerStateTerminated:
kind: ClassVar[str] = "kubernetes_container_state_terminated"
mapping: ClassVar[Dict[str, Bender]] = {
"container_id": S("containerID"),
"exit_code": S("exitCode"),
"finished_at": S("finishedAt"),
"message": S("message"),
"reason": S("reason"),
"signal": S("signal"),
"started_at": S("startedAt"),
}
container_id: Optional[str] = field(default=None)
exit_code: Optional[int] = field(default=None)
finished_at: Optional[datetime] = field(default=None)
message: Optional[str] = field(default=None)
reason: Optional[str] = field(default=None)
signal: Optional[int] = field(default=None)
started_at: Optional[datetime] = field(default=None)
@define(eq=False, slots=False)
class KubernetesContainerStateWaiting:
kind: ClassVar[str] = "kubernetes_container_state_waiting"
mapping: ClassVar[Dict[str, Bender]] = {
"message": S("message"),
"reason": S("reason"),
}
message: Optional[str] = field(default=None)
reason: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesContainerState:
kind: ClassVar[str] = "kubernetes_container_state"
mapping: ClassVar[Dict[str, Bender]] = {
"running": S("running") >> Bend(KubernetesContainerStateRunning.mapping),
"terminated": S("terminated") >> Bend(KubernetesContainerStateTerminated.mapping),
"waiting": S("waiting") >> Bend(KubernetesContainerStateWaiting.mapping),
}
running: Optional[KubernetesContainerStateRunning] = field(default=None)
terminated: Optional[KubernetesContainerStateTerminated] = field(default=None)
waiting: Optional[KubernetesContainerStateWaiting] = field(default=None)
@define(eq=False, slots=False)
class KubernetesContainerStatus:
kind: ClassVar[str] = "kubernetes_container_status"
mapping: ClassVar[Dict[str, Bender]] = {
"container_id": S("containerID"),
"image": S("image"),
"image_id": S("imageID"),
"last_state": S("lastState") >> Bend(KubernetesContainerState.mapping),
"name": S("name"),
"ready": S("ready"),
"restart_count": S("restartCount"),
"started": S("started"),
"state": S("state") >> Bend(KubernetesContainerState.mapping),
}
container_id: Optional[str] = field(default=None)
image: Optional[str] = field(default=None)
image_id: Optional[str] = field(default=None)
last_state: Optional[KubernetesContainerState] = field(default=None)
name: Optional[str] = field(default=None)
ready: Optional[bool] = field(default=None)
restart_count: Optional[int] = field(default=None)
started: Optional[bool] = field(default=None)
state: Optional[KubernetesContainerState] = field(default=None)
@define(eq=False, slots=False)
class KubernetesPodIPs:
kind: ClassVar[str] = "kubernetes_pod_ips"
mapping: ClassVar[Dict[str, Bender]] = {"ip": S("ip")}
ip: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesPodStatus:
kind: ClassVar[str] = "kubernetes_pod_status"
mapping: ClassVar[Dict[str, Bender]] = {
"conditions": S("conditions", default=[])
>> SortTransitionTime
>> ForallBend(KubernetesPodStatusConditions.mapping),
"container_statuses": S("containerStatuses", default=[]) >> ForallBend(KubernetesContainerStatus.mapping),
"ephemeral_container_statuses": S("ephemeralContainerStatuses", default=[])
>> ForallBend(KubernetesContainerState.mapping),
"host_ip": S("hostIP"),
"init_container_statuses": S("initContainerStatuses", default=[])
>> ForallBend(KubernetesContainerStatus.mapping),
"message": S("message"),
"nominated_node_name": S("nominatedNodeName"),
"phase": S("phase"),
"pod_ip": S("podIP"),
"pod_ips": S("podIPs", default=[]) >> ForallBend(KubernetesPodIPs.mapping),
"qos_class": S("qosClass"),
"reason": S("reason"),
"start_time": S("startTime"),
}
conditions: List[KubernetesPodStatusConditions] = field(factory=list)
container_statuses: List[KubernetesContainerStatus] = field(factory=list)
ephemeral_container_statuses: List[KubernetesContainerState] = field(factory=list)
host_ip: Optional[str] = field(default=None)
init_container_statuses: List[KubernetesContainerStatus] = field(factory=list)
message: Optional[str] = field(default=None)
nominated_node_name: Optional[str] = field(default=None)
phase: Optional[str] = field(default=None)
pod_ip: Optional[str] = field(default=None)
pod_ips: List[KubernetesPodIPs] = field(factory=list)
qos_class: Optional[str] = field(default=None)
reason: Optional[str] = field(default=None)
start_time: Optional[datetime] = field(default=None)
@define
class KubernetesContainerPort:
kind: ClassVar[str] = "kubernetes_container_port"
mapping: ClassVar[Dict[str, Bender]] = {
"container_port": S("containerPort"),
"host_ip": S("hostIP"),
"host_port": S("hostPort"),
"name": S("name"),
"protocol": S("protocol"),
}
container_port: Optional[int] = field(default=None)
host_ip: Optional[str] = field(default=None)
host_port: Optional[int] = field(default=None)
name: Optional[str] = field(default=None)
protocol: Optional[str] = field(default=None)
@define
class KubernetesResourceRequirements:
kind: ClassVar[str] = "kubernetes_resource_requirements"
mapping: ClassVar[Dict[str, Bender]] = {
"limits": S("limits"),
"requests": S("requests"),
}
limits: Optional[Any] = field(default=None)
requests: Optional[Any] = field(default=None)
@define
class KubernetesSecurityContext:
kind: ClassVar[str] = "kubernetes_security_context"
mapping: ClassVar[Dict[str, Bender]] = {
"allow_privilege_escalation": S("allowPrivilegeEscalation"),
"privileged": S("privileged"),
"proc_mount": S("procMount"),
"read_only_root_filesystem": S("readOnlyRootFilesystem"),
"run_as_group": S("runAsGroup"),
"run_as_non_root": S("runAsNonRoot"),
"run_as_user": S("runAsUser"),
"se_linux_options": S("seLinuxOptions"),
"seccomp_profile": S("seccompProfile"),
"windows_options": S("windowsOptions"),
}
allow_privilege_escalation: Optional[bool] = field(default=None)
privileged: Optional[bool] = field(default=None)
proc_mount: Optional[str] = field(default=None)
read_only_root_filesystem: Optional[bool] = field(default=None)
run_as_group: Optional[int] = field(default=None)
run_as_non_root: Optional[bool] = field(default=None)
run_as_user: Optional[int] = field(default=None)
se_linux_options: Optional[Any] = field(default=None)
seccomp_profile: Optional[Any] = field(default=None)
windows_options: Optional[Any] = field(default=None)
@define
class KubernetesVolumeDevice:
kind: ClassVar[str] = "kubernetes_volume_device"
mapping: ClassVar[Dict[str, Bender]] = {
"device_path": S("devicePath"),
"name": S("name"),
}
device_path: Optional[str] = field(default=None)
name: Optional[str] = field(default=None)
@define
class KubernetesVolumeMount:
kind: ClassVar[str] = "kubernetes_volume_mount"
mapping: ClassVar[Dict[str, Bender]] = {
"mount_path": S("mountPath"),
"mount_propagation": S("mountPropagation"),
"name": S("name"),
"read_only": S("readOnly"),
"sub_path": S("subPath"),
"sub_path_expr": S("subPathExpr"),
}
mount_path: Optional[str] = field(default=None)
mount_propagation: Optional[str] = field(default=None)
name: Optional[str] = field(default=None)
read_only: Optional[bool] = field(default=None)
sub_path: Optional[str] = field(default=None)
sub_path_expr: Optional[str] = field(default=None)
@define
class KubernetesContainer:
kind: ClassVar[str] = "kubernetes_container"
mapping: ClassVar[Dict[str, Bender]] = {
"args": S("args", default=[]),
"command": S("command", default=[]),
"image": S("image"),
"image_pull_policy": S("imagePullPolicy"),
"name": S("name"),
"ports": S("ports", default=[]) >> ForallBend(KubernetesContainerPort.mapping),
"resources": S("resources") >> Bend(KubernetesResourceRequirements.mapping),
"security_context": S("securityContext") >> Bend(KubernetesSecurityContext.mapping),
"stdin": S("stdin"),
"stdin_once": S("stdinOnce"),
"termination_message_path": S("terminationMessagePath"),
"termination_message_policy": S("terminationMessagePolicy"),
"tty": S("tty"),
"volume_devices": S("volumeDevices", default=[]) >> ForallBend(KubernetesVolumeDevice.mapping),
"volume_mounts": S("volumeMounts", default=[]) >> ForallBend(KubernetesVolumeMount.mapping),
"working_dir": S("workingDir"),
}
args: List[str] = field(factory=list)
command: List[str] = field(factory=list)
image: Optional[str] = field(default=None)
image_pull_policy: Optional[str] = field(default=None)
name: Optional[str] = field(default=None)
ports: List[KubernetesContainerPort] = field(factory=list)
resources: Optional[KubernetesResourceRequirements] = field(default=None)
security_context: Optional[KubernetesSecurityContext] = field(default=None)
stdin: Optional[bool] = field(default=None)
stdin_once: Optional[bool] = field(default=None)
termination_message_path: Optional[str] = field(default=None)
termination_message_policy: Optional[str] = field(default=None)
tty: Optional[bool] = field(default=None)
volume_devices: List[KubernetesVolumeDevice] = field(factory=list)
volume_mounts: List[KubernetesVolumeMount] = field(factory=list)
working_dir: Optional[str] = field(default=None)
@define
class KubernetesPodSecurityContext:
kind: ClassVar[str] = "kubernetes_pod_security_context"
mapping: ClassVar[Dict[str, Bender]] = {
"fs_group": S("fsGroup"),
"fs_group_change_policy": S("fsGroupChangePolicy"),
"run_as_group": S("runAsGroup"),
"run_as_non_root": S("runAsNonRoot"),
"run_as_user": S("runAsUser"),
"se_linux_options": S("seLinuxOptions"),
"seccomp_profile": S("seccompProfile"),
"supplemental_groups": S("supplementalGroups", default=[]),
"windows_options": S("windowsOptions"),
}
fs_group: Optional[int] = field(default=None)
fs_group_change_policy: Optional[str] = field(default=None)
run_as_group: Optional[int] = field(default=None)
run_as_non_root: Optional[bool] = field(default=None)
run_as_user: Optional[int] = field(default=None)
se_linux_options: Optional[Any] = field(default=None)
seccomp_profile: Optional[Any] = field(default=None)
supplemental_groups: List[int] = field(factory=list)
windows_options: Optional[Any] = field(default=None)
@define
class KubernetesToleration:
kind: ClassVar[str] = "kubernetes_toleration"
mapping: ClassVar[Dict[str, Bender]] = {
"effect": S("effect"),
"key": S("key"),
"operator": S("operator"),
"toleration_seconds": S("tolerationSeconds"),
"value": S("value"),
}
effect: Optional[str] = field(default=None)
key: Optional[str] = field(default=None)
operator: Optional[str] = field(default=None)
toleration_seconds: Optional[int] = field(default=None)
value: Optional[str] = field(default=None)
@define
class KubernetesVolume:
kind: ClassVar[str] = "kubernetes_volume"
mapping: ClassVar[Dict[str, Bender]] = {
"aws_elastic_block_store": S("awsElasticBlockStore"),
"azure_disk": S("azureDisk"),
"azure_file": S("azureFile"),
"cephfs": S("cephfs"),
"cinder": S("cinder"),
"config_map": S("configMap"),
"csi": S("csi"),
"downward_api": S("downwardAPI"),
"empty_dir": S("emptyDir"),
"ephemeral": S("ephemeral"),
"fc": S("fc"),
"flex_volume": S("flexVolume"),
"flocker": S("flocker"),
"gce_persistent_disk": S("gcePersistentDisk"),
"git_repo": S("gitRepo"),
"glusterfs": S("glusterfs"),
"host_path": S("hostPath"),
"iscsi": S("iscsi"),
"name": S("name"),
"nfs": S("nfs"),
"persistent_volume_claim": S("persistentVolumeClaim"),
"photon_persistent_disk": S("photonPersistentDisk"),
"portworx_volume": S("portworxVolume"),
"projected": S("projected"),
"quobyte": S("quobyte"),
"rbd": S("rbd"),
"scale_io": S("scaleIO"),
"secret": S("secret"),
"storageos": S("storageos"),
"vsphere_volume": S("vsphereVolume"),
}
aws_elastic_block_store: Optional[Any] = field(default=None)
azure_disk: Optional[Any] = field(default=None)
azure_file: Optional[Any] = field(default=None)
cephfs: Optional[Any] = field(default=None)
cinder: Optional[Any] = field(default=None)
config_map: Optional[Any] = field(default=None)
csi: Optional[Any] = field(default=None)
downward_api: Optional[Any] = field(default=None)
empty_dir: Optional[Any] = field(default=None)
ephemeral: Optional[Any] = field(default=None)
fc: Optional[Any] = field(default=None)
flex_volume: Optional[Any] = field(default=None)
flocker: Optional[Any] = field(default=None)
gce_persistent_disk: Optional[Any] = field(default=None)
git_repo: Optional[Any] = field(default=None)
glusterfs: Optional[Any] = field(default=None)
host_path: Optional[Any] = field(default=None)
iscsi: Optional[Any] = field(default=None)
name: Optional[str] = field(default=None)
nfs: Optional[Any] = field(default=None)
persistent_volume_claim: Optional[Any] = field(default=None)
photon_persistent_disk: Optional[Any] = field(default=None)
portworx_volume: Optional[Any] = field(default=None)
projected: Optional[Any] = field(default=None)
quobyte: Optional[Any] = field(default=None)
rbd: Optional[Any] = field(default=None)
scale_io: Optional[Any] = field(default=None)
secret: Optional[Any] = field(default=None)
storageos: Optional[Any] = field(default=None)
vsphere_volume: Optional[Any] = field(default=None)
@define
class KubernetesPodSpec:
kind: ClassVar[str] = "kubernetes_pod_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"active_deadline_seconds": S("activeDeadlineSeconds"),
"automount_service_account_token": S("automountServiceAccountToken"),
"containers": S("containers", default=[]) >> ForallBend(KubernetesContainer.mapping),
"dns_policy": S("dnsPolicy"),
"enable_service_links": S("enableServiceLinks"),
"ephemeral_containers": S("ephemeralContainers", default=[]) >> ForallBend(KubernetesContainer.mapping),
"host_ipc": S("hostIPC"),
"host_network": S("hostNetwork"),
"host_pid": S("hostPID"),
"hostname": S("hostname"),
"init_containers": S("initContainers", default=[]) >> ForallBend(KubernetesContainer.mapping),
"node_name": S("nodeName"),
"overhead": S("overhead"),
"preemption_policy": S("preemptionPolicy"),
"priority": S("priority"),
"priority_class_name": S("priorityClassName"),
"restart_policy": S("restartPolicy"),
"runtime_class_name": S("runtimeClassName"),
"scheduler_name": S("schedulerName"),
"security_context": S("securityContext") >> Bend(KubernetesSecurityContext.mapping),
"service_account": S("serviceAccount"),
"service_account_name": S("serviceAccountName"),
"set_hostname_as_fqdn": S("setHostnameAsFQDN"),
"share_process_namespace": S("shareProcessNamespace"),
"subdomain": S("subdomain"),
"termination_grace_period_seconds": S("terminationGracePeriodSeconds"),
"tolerations": S("tolerations", default=[]) >> ForallBend(KubernetesToleration.mapping),
"volumes": S("volumes", default=[]) >> ForallBend(KubernetesVolume.mapping),
}
active_deadline_seconds: Optional[int] = field(default=None)
automount_service_account_token: Optional[bool] = field(default=None)
containers: List[KubernetesContainer] = field(factory=list)
dns_policy: Optional[str] = field(default=None)
enable_service_links: Optional[bool] = field(default=None)
ephemeral_containers: List[KubernetesContainer] = field(factory=list)
host_ipc: Optional[bool] = field(default=None)
host_network: Optional[bool] = field(default=None)
host_pid: Optional[bool] = field(default=None)
hostname: Optional[str] = field(default=None)
init_containers: List[KubernetesContainer] = field(factory=list)
node_name: Optional[str] = field(default=None)
preemption_policy: Optional[str] = field(default=None)
priority: Optional[int] = field(default=None)
priority_class_name: Optional[str] = field(default=None)
restart_policy: Optional[str] = field(default=None)
runtime_class_name: Optional[str] = field(default=None)
scheduler_name: Optional[str] = field(default=None)
security_context: Optional[KubernetesPodSecurityContext] = field(default=None)
service_account: Optional[str] = field(default=None)
service_account_name: Optional[str] = field(default=None)
set_hostname_as_fqdn: Optional[bool] = field(default=None)
share_process_namespace: Optional[bool] = field(default=None)
subdomain: Optional[str] = field(default=None)
termination_grace_period_seconds: Optional[int] = field(default=None)
tolerations: List[KubernetesToleration] = field(factory=list)
volumes: List[KubernetesVolume] = field(factory=list)
@define(eq=False, slots=False)
class KubernetesPod(KubernetesResource):
kind: ClassVar[str] = "kubernetes_pod"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"pod_status": S("status") >> Bend(KubernetesPodStatus.mapping),
"pod_spec": S("spec") >> Bend(KubernetesPodSpec.mapping),
}
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["kubernetes_secret", "kubernetes_persistent_volume_claim", "kubernetes_config_map"],
"delete": ["kubernetes_stateful_set", "kubernetes_replica_set", "kubernetes_job", "kubernetes_daemon_set"],
}
}
pod_status: Optional[KubernetesPodStatus] = field(default=None)
pod_spec: Optional[KubernetesPodSpec] = field(default=None)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
super().connect_in_graph(builder, source)
volumes = bend(S("spec", "volumes", default=[]), source)
builder.connect_volumes(self, volumes)
if node_name := bend(S("spec", "nodeName"), source):
builder.add_edge(self, EdgeType.default, True, clazz=KubernetesNode, name=node_name)
container_array = bend(
S("spec", "containers") >> ForallBend(S("env", default=[]) >> ForallBend(S("valueFrom"))), source
)
for from_array in container_array:
for value_from in from_array:
if value_from is None:
continue
elif ref := value_from.get("secretKeyRef", None):
builder.add_edge(self, EdgeType.default, clazz=KubernetesSecret, name=ref["name"])
elif ref := value_from.get("configMapKeyRef", None):
builder.add_edge(self, EdgeType.default, clazz=KubernetesConfigMap, name=ref["name"])
# endregion
# region persistent volume claim
@define(eq=False, slots=False)
class KubernetesPersistentVolumeClaimStatusConditions:
kind: ClassVar[str] = "kubernetes_persistent_volume_claim_status_conditions"
mapping: ClassVar[Dict[str, Bender]] = {
"last_probe_time": S("lastProbeTime"),
"last_transition_time": S("lastTransitionTime"),
"message": S("message"),
"reason": S("reason"),
"status": S("status"),
"type": S("type"),
}
last_probe_time: Optional[datetime] = field(default=None)
last_transition_time: Optional[datetime] = field(default=None)
message: Optional[str] = field(default=None)
reason: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesPersistentVolumeClaimStatus:
kind: ClassVar[str] = "kubernetes_persistent_volume_claim_status"
mapping: ClassVar[Dict[str, Bender]] = {
"access_modes": S("accessModes", default=[]),
"allocated_resources": S("allocatedResources"),
"conditions": S("conditions", default=[])
>> SortTransitionTime
>> ForallBend(KubernetesPersistentVolumeClaimStatusConditions.mapping),
"phase": S("phase"),
"resize_status": S("resizeStatus"),
}
access_modes: List[str] = field(factory=list)
allocated_resources: Optional[str] = field(default=None)
conditions: List[KubernetesPersistentVolumeClaimStatusConditions] = field(factory=list)
phase: Optional[str] = field(default=None)
resize_status: Optional[str] = field(default=None)
@define
class KubernetesLabelSelectorRequirement:
kind: ClassVar[str] = "kubernetes_label_selector_requirement"
mapping: ClassVar[Dict[str, Bender]] = {
"key": S("key"),
"operator": S("operator"),
"values": S("values", default=[]),
}
key: Optional[str] = field(default=None)
operator: Optional[str] = field(default=None)
values: List[str] = field(factory=list)
@define
class KubernetesLabelSelector:
kind: ClassVar[str] = "kubernetes_label_selector"
mapping: ClassVar[Dict[str, Bender]] = {
"match_expressions": S("matchExpressions", default=[])
>> ForallBend(KubernetesLabelSelectorRequirement.mapping),
"match_labels": S("matchLabels"),
}
match_expressions: List[KubernetesLabelSelectorRequirement] = field(factory=list)
match_labels: Optional[Dict[str, str]] = field(default=None)
@define
class KubernetesPersistentVolumeClaimSpec:
kind: ClassVar[str] = "kubernetes_persistent_volume_claim_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"access_modes": S("accessModes", default=[]),
"resources": S("resources") >> Bend(KubernetesResourceRequirements.mapping),
"selector": S("selector") >> Bend(KubernetesLabelSelector.mapping),
"storage_class_name": S("storageClassName"),
"volume_mode": S("volumeMode"),
"volume_name": S("volumeName"),
}
access_modes: List[str] = field(factory=list)
resources: Optional[KubernetesResourceRequirements] = field(default=None)
selector: Optional[KubernetesLabelSelector] = field(default=None)
storage_class_name: Optional[str] = field(default=None)
volume_mode: Optional[str] = field(default=None)
volume_name: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesPersistentVolumeClaim(KubernetesResource):
kind: ClassVar[str] = "kubernetes_persistent_volume_claim"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"persistent_volume_claim_status": S("status") >> Bend(KubernetesPersistentVolumeClaimStatus.mapping),
"persistent_volume_claim_spec": S("spec") >> Bend(KubernetesPersistentVolumeClaimSpec.mapping),
}
reference_kinds: ClassVar[ModelReference] = {
"successors": {"default": ["kubernetes_persistent_volume"], "delete": []}
}
persistent_volume_claim_status: Optional[KubernetesPersistentVolumeClaimStatus] = field(default=None)
persistent_volume_claim_spec: Optional[KubernetesPersistentVolumeClaimSpec] = field(default=None)
# endregion
# region service
@define(eq=False, slots=False)
class KubernetesLoadbalancerIngressPorts:
kind: ClassVar[str] = "kubernetes_loadbalancer_ingress_ports"
mapping: ClassVar[Dict[str, Bender]] = {
"error": S("error"),
"port": S("port"),
"protocol": S("protocol"),
}
error: Optional[str] = field(default=None)
port: Optional[int] = field(default=None)
protocol: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesLoadbalancerIngress:
kind: ClassVar[str] = "kubernetes_loadbalancer_ingress"
mapping: ClassVar[Dict[str, Bender]] = {
"hostname": S("hostname"),
"ip": S("ip"),
"ports": S("ports", default=[]) >> ForallBend(KubernetesLoadbalancerIngressPorts.mapping),
}
hostname: Optional[str] = field(default=None)
ip: Optional[str] = field(default=None)
ports: List[KubernetesLoadbalancerIngressPorts] = field(factory=list)
@define(eq=False, slots=False)
class KubernetesLoadbalancerStatus:
kind: ClassVar[str] = "kubernetes_loadbalancer_status"
mapping: ClassVar[Dict[str, Bender]] = {
"ingress": S("ingress", default=[]) >> ForallBend(KubernetesLoadbalancerIngress.mapping),
}
ingress: List[KubernetesLoadbalancerIngress] = field(factory=list)
@define(eq=False, slots=False)
class KubernetesServiceStatusConditions:
kind: ClassVar[str] = "kubernetes_service_status_conditions"
mapping: ClassVar[Dict[str, Bender]] = {
"last_transition_time": S("lastTransitionTime"),
"message": S("message"),
"observed_generation": S("observedGeneration"),
"reason": S("reason"),
"status": S("status"),
"type": S("type"),
}
last_transition_time: Optional[datetime] = field(default=None)
message: Optional[str] = field(default=None)
observed_generation: Optional[int] = field(default=None)
reason: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesServiceStatus:
kind: ClassVar[str] = "kubernetes_service_status"
mapping: ClassVar[Dict[str, Bender]] = {
"conditions": S("conditions", default=[])
>> SortTransitionTime
>> ForallBend(KubernetesServiceStatusConditions.mapping),
"load_balancer": S("loadBalancer") >> Bend(KubernetesLoadbalancerStatus.mapping),
}
conditions: List[KubernetesServiceStatusConditions] = field(factory=list)
load_balancer: Optional[KubernetesLoadbalancerStatus] = field(default=None)
@define
class KubernetesServicePort:
kind: ClassVar[str] = "kubernetes_service_port"
mapping: ClassVar[Dict[str, Bender]] = {
"app_protocol": S("appProtocol"),
"name": S("name"),
"node_port": S("nodePort"),
"port": S("port"),
"protocol": S("protocol"),
"target_port": S("targetPort"),
}
app_protocol: Optional[str] = field(default=None)
name: Optional[str] = field(default=None)
node_port: Optional[int] = field(default=None)
port: Optional[int] = field(default=None)
protocol: Optional[str] = field(default=None)
target_port: Optional[Union[str, int]] = field(default=None)
@define
class KubernetesServiceSpec:
kind: ClassVar[str] = "kubernetes_service_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"allocate_load_balancer_node_ports": S("allocateLoadBalancerNodePorts"),
"cluster_ip": S("clusterIP"),
"cluster_ips": S("clusterIPs", default=[]),
"external_ips": S("externalIPs", default=[]),
"external_name": S("externalName"),
"external_traffic_policy": S("externalTrafficPolicy"),
"health_check_node_port": S("healthCheckNodePort"),
"internal_traffic_policy": S("internalTrafficPolicy"),
"ip_families": S("ipFamilies", default=[]),
"ip_family_policy": S("ipFamilyPolicy"),
"load_balancer_class": S("loadBalancerClass"),
"load_balancer_ip": S("loadBalancerIP"),
"load_balancer_source_ranges": S("loadBalancerSourceRanges", default=[]),
"ports": S("ports", default=[]) >> ForallBend(KubernetesServicePort.mapping),
"publish_not_ready_addresses": S("publishNotReadyAddresses"),
"session_affinity": S("sessionAffinity"),
"type": S("type"),
"selector": S("selector", default={}),
}
allocate_load_balancer_node_ports: Optional[bool] = field(default=None)
cluster_ip: Optional[str] = field(default=None)
cluster_ips: List[str] = field(factory=list)
external_ips: List[str] = field(factory=list)
external_name: Optional[str] = field(default=None)
external_traffic_policy: Optional[str] = field(default=None)
health_check_node_port: Optional[int] = field(default=None)
internal_traffic_policy: Optional[str] = field(default=None)
ip_families: List[str] = field(factory=list)
ip_family_policy: Optional[str] = field(default=None)
load_balancer_class: Optional[str] = field(default=None)
load_balancer_ip: Optional[str] = field(default=None)
load_balancer_source_ranges: List[str] = field(factory=list)
ports: List[KubernetesServicePort] = field(factory=list)
publish_not_ready_addresses: Optional[bool] = field(default=None)
session_affinity: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
selector: Optional[Dict[str, str]] = field(default=None)
@define(eq=False, slots=False)
class KubernetesService(KubernetesResource, BaseLoadBalancer):
kind: ClassVar[str] = "kubernetes_service"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"service_status": S("status") >> Bend(KubernetesServiceStatus.mapping),
"service_spec": S("spec") >> Bend(KubernetesServiceSpec.mapping),
"public_ip_address": S("spec", "externalIPs", 0),
}
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["kubernetes_pod", "kubernetes_endpoint_slice"],
"delete": [],
}
}
service_status: Optional[KubernetesServiceStatus] = field(default=None)
service_spec: Optional[KubernetesServiceSpec] = field(default=None)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
super().connect_in_graph(builder, source)
resolved_backends = set()
pods = [
((key, val), pod)
for pod in builder.graph.nodes
if isinstance(pod, KubernetesPod)
for key, val in pod.labels.items()
]
pods_by_labels = defaultdict(list)
for (key, val), pod in pods:
pods_by_labels[(key, val)].append(pod)
selector = bend(S("spec", "selector"), source)
if selector:
builder.add_edges_from_selector(self, EdgeType.default, selector, KubernetesPod)
for key, value in selector.items():
for pod in pods_by_labels.get((key, value), []):
resolved_backends.add(pod.name or pod.id)
self.backends = list(resolved_backends)
# endregion
@define(eq=False, slots=False)
class KubernetesPodTemplate(KubernetesResource):
kind: ClassVar[str] = "kubernetes_pod_template"
@define(eq=False, slots=False)
class KubernetesClusterInfo:
kind: ClassVar[str] = "kubernetes_cluster_info"
major: str
minor: str
platform: str
server_url: str
@define(eq=False, slots=False)
class KubernetesCluster(KubernetesResource, BaseAccount):
kind: ClassVar[str] = "kubernetes_cluster"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": [
"kubernetes_volume_attachment",
"kubernetes_validating_webhook_configuration",
"kubernetes_storage_class",
"kubernetes_priority_level_configuration",
"kubernetes_priority_class",
"kubernetes_persistent_volume",
"kubernetes_node",
"kubernetes_namespace",
"kubernetes_mutating_webhook_configuration",
"kubernetes_flow_schema",
"kubernetes_csi_node",
"kubernetes_csi_driver",
"kubernetes_cluster_role_binding",
"kubernetes_cluster_role",
"kubernetes_ingress_class",
],
"delete": [],
}
}
cluster_info: Optional[KubernetesClusterInfo] = None
@define(eq=False, slots=False)
class KubernetesConfigMap(KubernetesResource):
kind: ClassVar[str] = "kubernetes_config_map"
@define(eq=False, slots=False)
class KubernetesEndpointAddress:
kind: ClassVar[str] = "kubernetes_endpoint_address"
mapping: ClassVar[Dict[str, Bender]] = {
"ip": S("ip"),
"node_name": S("nodeName"),
"_target_ref": S("targetRef", "uid"),
}
ip: Optional[str] = field(default=None)
node_name: Optional[str] = field(default=None)
_target_ref: Optional[str] = field(default=None)
def target_ref(self) -> Optional[str]:
return self._target_ref
@define(eq=False, slots=False)
class KubernetesEndpointPort:
kind: ClassVar[str] = "kubernetes_endpoint_port"
mapping: ClassVar[Dict[str, Bender]] = {
"name": S("name"),
"port": S("port"),
"protocol": S("protocol"),
}
name: Optional[str] = field(default=None)
port: Optional[int] = field(default=None)
protocol: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesEndpointSubset:
kind: ClassVar[str] = "kubernetes_endpoint_subset"
mapping: ClassVar[Dict[str, Bender]] = {
"addresses": S("addresses", default=[]) >> ForallBend(KubernetesEndpointAddress.mapping),
"ports": S("ports", default=[]) >> ForallBend(KubernetesEndpointPort.mapping),
}
addresses: List[KubernetesEndpointAddress] = field(factory=list)
ports: List[KubernetesEndpointPort] = field(factory=list)
@define(eq=False, slots=False)
class KubernetesEndpoints(KubernetesResource):
kind: ClassVar[str] = "kubernetes_endpoint"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"subsets": S("subsets", default=[]) >> ForallBend(KubernetesEndpointSubset.mapping),
}
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["kubernetes_pod", "kubernetes_node", "kubernetes_endpoint_slice"],
"delete": [],
}
}
subsets: List[KubernetesEndpointSubset] = field(factory=list)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
super().connect_in_graph(builder, source)
for subset in self.subsets:
for address in subset.addresses:
if address.target_ref():
builder.add_edge(self, EdgeType.default, id=address.target_ref())
@define(eq=False, slots=False)
class KubernetesEndpointSlice(KubernetesResource):
kind: ClassVar[str] = "kubernetes_endpoint_slice"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": [],
"delete": ["kubernetes_service", "kubernetes_endpoint"],
}
}
@define(eq=False, slots=False)
class KubernetesLimitRange(KubernetesResource):
kind: ClassVar[str] = "kubernetes_limit_range"
@define(eq=False, slots=False)
class KubernetesNamespaceStatusConditions:
kind: ClassVar[str] = "kubernetes_namespace_status_conditions"
mapping: ClassVar[Dict[str, Bender]] = {
"last_transition_time": S("lastTransitionTime"),
"message": S("message"),
"reason": S("reason"),
"status": S("status"),
"type": S("type"),
}
last_transition_time: Optional[datetime] = field(default=None)
message: Optional[str] = field(default=None)
reason: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesNamespaceStatus:
kind: ClassVar[str] = "kubernetes_namespace_status"
mapping: ClassVar[Dict[str, Bender]] = {
"conditions": S("conditions", default=[])
>> SortTransitionTime
>> ForallBend(KubernetesNamespaceStatusConditions.mapping),
"phase": S("phase"),
}
conditions: List[KubernetesNamespaceStatusConditions] = field(factory=list)
phase: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesNamespace(KubernetesResource, BaseRegion):
kind: ClassVar[str] = "kubernetes_namespace"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"namespace_status": S("status") >> Bend(KubernetesNamespaceStatus.mapping),
}
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": [
"kubernetes_stateful_set",
"kubernetes_service",
"kubernetes_secret",
"kubernetes_role_binding",
"kubernetes_role",
"kubernetes_replica_set",
"kubernetes_pod_disruption_budget",
"kubernetes_pod",
"kubernetes_job",
"kubernetes_endpoint_slice",
"kubernetes_service_account",
"kubernetes_endpoint",
"kubernetes_deployment",
"kubernetes_persistent_volume_claim",
"kubernetes_daemon_set",
"kubernetes_cron_job",
"kubernetes_controller_revision",
"kubernetes_config_map",
],
"delete": [],
}
}
namespace_status: Optional[KubernetesNamespaceStatus] = field(default=None)
@define(eq=False, slots=False)
class KubernetesPersistentVolumeStatus:
kind: ClassVar[str] = "kubernetes_persistent_volume_status"
mapping: ClassVar[Dict[str, Bender]] = {
"message": S("message"),
"phase": S("phase"),
"reason": S("reason"),
}
message: Optional[str] = field(default=None)
phase: Optional[str] = field(default=None)
reason: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesPersistentVolumeSpecAwsElasticBlockStore:
kind: ClassVar[str] = "kubernetes_persistent_volume_spec_aws_elastic_block_store"
mapping: ClassVar[Dict[str, Bender]] = {
"volume_id": S("volumeID"),
"fs_type": S("fsType"),
}
volume_id: Optional[str] = field(default=None)
fs_type: Optional[str] = field(default=None)
@define
class KubernetesPersistentVolumeSpec:
kind: ClassVar[str] = "kubernetes_persistent_volume_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"access_modes": S("accessModes", default=[]),
"aws_elastic_block_store": S("awsElasticBlockStore")
>> Bend(KubernetesPersistentVolumeSpecAwsElasticBlockStore.mapping),
"azure_disk": S("azureDisk"),
"azure_file": S("azureFile"),
"capacity": S("capacity"),
"cephfs": S("cephfs"),
"cinder": S("cinder"),
"claim_ref": S("claimRef"),
"csi": S("csi"),
"fc": S("fc"),
"flex_volume": S("flexVolume"),
"flocker": S("flocker"),
"gce_persistent_disk": S("gcePersistentDisk"),
"glusterfs": S("glusterfs"),
"host_path": S("hostPath"),
"iscsi": S("iscsi"),
"local": S("local"),
"mount_options": S("mountOptions", default=[]),
"nfs": S("nfs"),
"node_affinity": S("nodeAffinity"),
"persistent_volume_reclaim_policy": S("persistentVolumeReclaimPolicy"),
"photon_persistent_disk": S("photonPersistentDisk"),
"portworx_volume": S("portworxVolume"),
"quobyte": S("quobyte"),
"rbd": S("rbd"),
"scale_io": S("scaleIO"),
"storage_class_name": S("storageClassName"),
"storageos": S("storageos"),
"volume_mode": S("volumeMode"),
"vsphere_volume": S("vsphereVolume"),
}
access_modes: List[str] = field(factory=list)
aws_elastic_block_store: Optional[KubernetesPersistentVolumeSpecAwsElasticBlockStore] = field(default=None)
azure_disk: Optional[str] = field(default=None)
azure_file: Optional[str] = field(default=None)
capacity: Optional[Json] = field(default=None)
cephfs: Optional[str] = field(default=None)
cinder: Optional[str] = field(default=None)
claim_ref: Optional[Json] = field(default=None)
csi: Optional[Any] = field(default=None)
fc: Optional[str] = field(default=None)
flex_volume: Optional[str] = field(default=None)
flocker: Optional[str] = field(default=None)
gce_persistent_disk: Optional[str] = field(default=None)
glusterfs: Optional[str] = field(default=None)
host_path: Optional[str] = field(default=None)
iscsi: Optional[str] = field(default=None)
local: Optional[str] = field(default=None)
mount_options: List[str] = field(factory=list)
nfs: Optional[str] = field(default=None)
node_affinity: Optional[str] = field(default=None)
persistent_volume_reclaim_policy: Optional[str] = field(default=None)
photon_persistent_disk: Optional[str] = field(default=None)
portworx_volume: Optional[str] = field(default=None)
quobyte: Optional[str] = field(default=None)
rbd: Optional[str] = field(default=None)
scale_io: Optional[str] = field(default=None)
storage_class_name: Optional[str] = field(default=None)
storageos: Optional[str] = field(default=None)
volume_mode: Optional[str] = field(default=None)
vsphere_volume: Optional[str] = field(default=None)
VolumeStatusMapping = {
"Available": VolumeStatus.AVAILABLE,
"Bound": VolumeStatus.IN_USE,
"Released": VolumeStatus.BUSY,
"Failed": VolumeStatus.ERROR,
}
@define(eq=False, slots=False)
class KubernetesPersistentVolume(KubernetesResource, BaseVolume):
kind: ClassVar[str] = "kubernetes_persistent_volume"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"persistent_volume_status": S("status") >> Bend(KubernetesPersistentVolumeStatus.mapping),
"persistent_volume_spec": S("spec") >> Bend(KubernetesPersistentVolumeSpec.mapping),
"volume_size": S("spec", "capacity", "storage", default="0") >> StringToUnitNumber("GB"),
"volume_type": S("spec", "storageClassName"),
"volume_status": S("status", "phase") >> MapEnum(VolumeStatusMapping, VolumeStatus.UNKNOWN),
}
persistent_volume_status: Optional[KubernetesPersistentVolumeStatus] = field(default=None)
persistent_volume_spec: Optional[KubernetesPersistentVolumeSpec] = field(default=None)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
super().connect_in_graph(builder, source)
claim_ref = bend(S("spec", "claimRef", "uid"), source)
if claim_ref:
builder.add_edge(self, EdgeType.default, id=claim_ref, reverse=True)
@define(eq=False, slots=False)
class KubernetesReplicationControllerStatusConditions:
kind: ClassVar[str] = "kubernetes_replication_controller_status_conditions"
mapping: ClassVar[Dict[str, Bender]] = {
"last_transition_time": S("lastTransitionTime"),
"message": S("message"),
"reason": S("reason"),
"status": S("status"),
"type": S("type"),
}
last_transition_time: Optional[datetime] = field(default=None)
message: Optional[str] = field(default=None)
reason: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesReplicationControllerStatus:
kind: ClassVar[str] = "kubernetes_replication_controller_status"
mapping: ClassVar[Dict[str, Bender]] = {
"available_replicas": S("availableReplicas"),
"conditions": S("conditions", default=[])
>> SortTransitionTime
>> ForallBend(KubernetesReplicationControllerStatusConditions.mapping),
"fully_labeled_replicas": S("fullyLabeledReplicas"),
"observed_generation": S("observedGeneration"),
"ready_replicas": S("readyReplicas"),
"replicas": S("replicas"),
}
available_replicas: Optional[int] = field(default=None)
conditions: List[KubernetesReplicationControllerStatusConditions] = field(factory=list)
fully_labeled_replicas: Optional[int] = field(default=None)
observed_generation: Optional[int] = field(default=None)
ready_replicas: Optional[int] = field(default=None)
replicas: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class KubernetesReplicationController(KubernetesResource):
kind: ClassVar[str] = "kubernetes_replication_controller"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"replication_controller_status": S("status") >> Bend(KubernetesReplicationControllerStatus.mapping),
}
replication_controller_status: Optional[KubernetesReplicationControllerStatus] = field(default=None)
@define(eq=False, slots=False)
class KubernetesResourceQuotaStatus:
kind: ClassVar[str] = "kubernetes_resource_quota_status"
mapping: ClassVar[Dict[str, Bender]] = {
"hard": S("hard"),
"used": S("used"),
}
hard: Optional[Any] = field(default=None)
used: Optional[Any] = field(default=None)
@define
class KubernetesResourceQuotaSpec:
kind: ClassVar[str] = "kubernetes_resource_quota_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"hard": S("hard"),
"scope_selector": S("scopeSelector"),
"scopes": S("scopes", default=[]),
}
hard: Optional[Any] = field(default=None)
scope_selector: Optional[Any] = field(default=None)
scopes: List[str] = field(factory=list)
@define(eq=False, slots=False)
class KubernetesResourceQuota(KubernetesResource, BaseQuota):
kind: ClassVar[str] = "kubernetes_resource_quota"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"resource_quota_status": S("status") >> Bend(KubernetesResourceQuotaStatus.mapping),
"resource_quota_spec": S("spec") >> Bend(KubernetesResourceQuotaSpec.mapping),
}
resource_quota_status: Optional[KubernetesResourceQuotaStatus] = field(default=None)
resource_quota_spec: Optional[KubernetesResourceQuotaSpec] = field(default=None)
@define(eq=False, slots=False)
class KubernetesSecret(KubernetesResource):
kind: ClassVar[str] = "kubernetes_secret"
@define(eq=False, slots=False)
class KubernetesServiceAccount(KubernetesResource):
kind: ClassVar[str] = "kubernetes_service_account"
reference_kinds: ClassVar[ModelReference] = {"successors": {"default": ["kubernetes_secret"], "delete": []}}
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
super().connect_in_graph(builder, source)
for secret in bend(S("secrets", default=[]), source):
if name := secret.get("name", None):
builder.add_edge(self, EdgeType.default, clazz=KubernetesSecret, name=name)
@define(eq=False, slots=False)
class KubernetesMutatingWebhookConfiguration(KubernetesResource):
kind: ClassVar[str] = "kubernetes_mutating_webhook_configuration"
@define(eq=False, slots=False)
class KubernetesValidatingWebhookConfiguration(KubernetesResource):
kind: ClassVar[str] = "kubernetes_validating_webhook_configuration"
@define(eq=False, slots=False)
class KubernetesControllerRevision(KubernetesResource):
kind: ClassVar[str] = "kubernetes_controller_revision"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": [],
"delete": ["kubernetes_stateful_set", "kubernetes_daemon_set"],
}
}
@define(eq=False, slots=False)
class KubernetesDaemonSetStatusConditions:
kind: ClassVar[str] = "kubernetes_daemon_set_status_conditions"
mapping: ClassVar[Dict[str, Bender]] = {
"last_transition_time": S("lastTransitionTime"),
"message": S("message"),
"reason": S("reason"),
"status": S("status"),
"type": S("type"),
}
last_transition_time: Optional[datetime] = field(default=None)
message: Optional[str] = field(default=None)
reason: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesDaemonSetStatus:
kind: ClassVar[str] = "kubernetes_daemon_set_status"
mapping: ClassVar[Dict[str, Bender]] = {
"collision_count": S("collisionCount"),
"conditions": S("conditions", default=[])
>> SortTransitionTime
>> ForallBend(KubernetesDaemonSetStatusConditions.mapping),
"current_number_scheduled": S("currentNumberScheduled"),
"desired_number_scheduled": S("desiredNumberScheduled"),
"number_available": S("numberAvailable"),
"number_misscheduled": S("numberMisscheduled"),
"number_ready": S("numberReady"),
"number_unavailable": S("numberUnavailable"),
"observed_generation": S("observedGeneration"),
"updated_number_scheduled": S("updatedNumberScheduled"),
}
collision_count: Optional[int] = field(default=None)
conditions: List[KubernetesDaemonSetStatusConditions] = field(factory=list)
current_number_scheduled: Optional[int] = field(default=None)
desired_number_scheduled: Optional[int] = field(default=None)
number_available: Optional[int] = field(default=None)
number_misscheduled: Optional[int] = field(default=None)
number_ready: Optional[int] = field(default=None)
number_unavailable: Optional[int] = field(default=None)
observed_generation: Optional[int] = field(default=None)
updated_number_scheduled: Optional[int] = field(default=None)
@define
class KubernetesPodTemplateSpec:
kind: ClassVar[str] = "kubernetes_pod_template_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"spec": S("spec") >> Bend(KubernetesPodSpec.mapping),
}
spec: Optional[KubernetesPodSpec] = field(default=None)
@define
class KubernetesDaemonSetSpec:
kind: ClassVar[str] = "kubernetes_daemon_set_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"min_ready_seconds": S("minReadySeconds"),
"revision_history_limit": S("revisionHistoryLimit"),
"selector": S("selector") >> Bend(KubernetesLabelSelector.mapping),
"template": S("template") >> Bend(KubernetesPodTemplateSpec.mapping),
}
min_ready_seconds: Optional[int] = field(default=None)
revision_history_limit: Optional[int] = field(default=None)
selector: Optional[KubernetesLabelSelector] = field(default=None)
template: Optional[KubernetesPodTemplateSpec] = field(default=None)
@define(eq=False, slots=False)
class KubernetesDaemonSet(KubernetesResource):
kind: ClassVar[str] = "kubernetes_daemon_set"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"daemon_set_status": S("status") >> Bend(KubernetesDaemonSetStatus.mapping),
"daemon_set_spec": S("spec") >> Bend(KubernetesDaemonSetSpec.mapping),
}
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["kubernetes_pod", "kubernetes_controller_revision"],
"delete": [],
}
}
daemon_set_status: Optional[KubernetesDaemonSetStatus] = field(default=None)
daemon_set_spec: Optional[KubernetesDaemonSetSpec] = field(default=None)
@define(eq=False, slots=False)
class KubernetesDeploymentStatusCondition:
kind: ClassVar[str] = "kubernetes_deployment_status_condition"
mapping: ClassVar[Dict[str, Bender]] = {
"last_transition_time": S("lastTransitionTime"),
"last_update_time": S("lastUpdateTime"),
"message": S("message"),
"reason": S("reason"),
"status": S("status"),
"type": S("type"),
}
last_transition_time: Optional[datetime] = field(default=None)
last_update_time: Optional[datetime] = field(default=None)
message: Optional[str] = field(default=None)
reason: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesDeploymentStatus:
kind: ClassVar[str] = "kubernetes_deployment_status"
mapping: ClassVar[Dict[str, Bender]] = {
"available_replicas": S("availableReplicas"),
"collision_count": S("collisionCount"),
"conditions": S("conditions", default=[])
>> SortTransitionTime
>> ForallBend(KubernetesDeploymentStatusCondition.mapping),
"observed_generation": S("observedGeneration"),
"ready_replicas": S("readyReplicas"),
"replicas": S("replicas"),
"unavailable_replicas": S("unavailableReplicas"),
"updated_replicas": S("updatedReplicas"),
}
available_replicas: Optional[int] = field(default=None)
collision_count: Optional[int] = field(default=None)
conditions: List[KubernetesDeploymentStatusCondition] = field(factory=list)
observed_generation: Optional[int] = field(default=None)
ready_replicas: Optional[int] = field(default=None)
replicas: Optional[int] = field(default=None)
unavailable_replicas: Optional[int] = field(default=None)
updated_replicas: Optional[int] = field(default=None)
@define
class KubernetesRollingUpdateDeployment:
kind: ClassVar[str] = "kubernetes_rolling_update_deployment"
mapping: ClassVar[Dict[str, Bender]] = {
"max_surge": S("maxSurge"),
"max_unavailable": S("maxUnavailable"),
}
max_surge: Optional[Union[str, int]] = field(default=None)
max_unavailable: Optional[Union[str, int]] = field(default=None)
@define
class KubernetesDeploymentStrategy:
kind: ClassVar[str] = "kubernetes_deployment_strategy"
mapping: ClassVar[Dict[str, Bender]] = {
"rolling_update": S("rollingUpdate") >> Bend(KubernetesRollingUpdateDeployment.mapping),
"type": S("type"),
}
rolling_update: Optional[KubernetesRollingUpdateDeployment] = field(default=None)
type: Optional[str] = field(default=None)
@define
class KubernetesDeploymentSpec:
kind: ClassVar[str] = "kubernetes_deployment_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"min_ready_seconds": S("minReadySeconds"),
"paused": S("paused"),
"progress_deadline_seconds": S("progressDeadlineSeconds"),
"replicas": S("replicas"),
"revision_history_limit": S("revisionHistoryLimit"),
"selector": S("selector") >> Bend(KubernetesLabelSelector.mapping),
"strategy": S("strategy") >> Bend(KubernetesDeploymentStrategy.mapping),
"template": S("template") >> Bend(KubernetesPodTemplateSpec.mapping),
}
min_ready_seconds: Optional[int] = field(default=None)
paused: Optional[bool] = field(default=None)
progress_deadline_seconds: Optional[int] = field(default=None)
replicas: Optional[int] = field(default=None)
revision_history_limit: Optional[int] = field(default=None)
selector: Optional[KubernetesLabelSelector] = field(default=None)
strategy: Optional[KubernetesDeploymentStrategy] = field(default=None)
template: Optional[KubernetesPodTemplateSpec] = field(default=None)
@define(eq=False, slots=False)
class KubernetesDeployment(KubernetesResource):
kind: ClassVar[str] = "kubernetes_deployment"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"deployment_status": S("status") >> Bend(KubernetesDeploymentStatus.mapping),
"deployment_spec": S("spec") >> Bend(KubernetesDeploymentSpec.mapping),
}
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["kubernetes_replica_set"],
"delete": [],
}
}
deployment_status: Optional[KubernetesDeploymentStatus] = field(default=None)
deployment_spec: Optional[KubernetesDeploymentSpec] = field(default=None)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
super().connect_in_graph(builder, source)
selector = bend(S("spec", "selector", "matchLabels"), source)
if selector:
builder.add_edges_from_selector(self, EdgeType.default, selector, KubernetesReplicaSet)
@define(eq=False, slots=False)
class KubernetesReplicaSetStatusCondition:
kind: ClassVar[str] = "kubernetes_replica_set_status_conditions"
mapping: ClassVar[Dict[str, Bender]] = {
"last_transition_time": S("lastTransitionTime"),
"message": S("message"),
"reason": S("reason"),
"status": S("status"),
"type": S("type"),
}
last_transition_time: Optional[datetime] = field(default=None)
message: Optional[str] = field(default=None)
reason: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesReplicaSetStatus:
kind: ClassVar[str] = "kubernetes_replica_set_status"
mapping: ClassVar[Dict[str, Bender]] = {
"available_replicas": S("availableReplicas"),
"conditions": S("conditions", default=[])
>> SortTransitionTime
>> ForallBend(KubernetesReplicaSetStatusCondition.mapping),
"fully_labeled_replicas": S("fullyLabeledReplicas"),
"observed_generation": S("observedGeneration"),
"ready_replicas": S("readyReplicas"),
"replicas": S("replicas"),
}
available_replicas: Optional[int] = field(default=None)
conditions: List[KubernetesReplicaSetStatusCondition] = field(factory=list)
fully_labeled_replicas: Optional[int] = field(default=None)
observed_generation: Optional[int] = field(default=None)
ready_replicas: Optional[int] = field(default=None)
replicas: Optional[int] = field(default=None)
@define
class KubernetesReplicaSetSpec:
kind: ClassVar[str] = "kubernetes_replica_set_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"min_ready_seconds": S("minReadySeconds"),
"replicas": S("replicas"),
"selector": S("selector") >> Bend(KubernetesLabelSelector.mapping),
"template": S("template") >> Bend(KubernetesPodTemplateSpec.mapping),
}
min_ready_seconds: Optional[int] = field(default=None)
replicas: Optional[int] = field(default=None)
selector: Optional[KubernetesLabelSelector] = field(default=None)
template: Optional[KubernetesPodTemplateSpec] = field(default=None)
@define(eq=False, slots=False)
class KubernetesReplicaSet(KubernetesResource):
kind: ClassVar[str] = "kubernetes_replica_set"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"replica_set_status": S("status") >> Bend(KubernetesReplicaSetStatus.mapping),
"replica_set_spec": S("spec") >> Bend(KubernetesReplicaSetSpec.mapping),
}
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["kubernetes_pod"],
"delete": ["kubernetes_deployment"],
}
}
replica_set_status: Optional[KubernetesReplicaSetStatus] = field(default=None)
replica_set_spec: Optional[KubernetesReplicaSetSpec] = field(default=None)
@define(eq=False, slots=False)
class KubernetesStatefulSetStatusCondition:
kind: ClassVar[str] = "kubernetes_stateful_set_status_condition"
mapping: ClassVar[Dict[str, Bender]] = {
"last_transition_time": S("lastTransitionTime"),
"message": S("message"),
"reason": S("reason"),
"status": S("status"),
"type": S("type"),
}
last_transition_time: Optional[datetime] = field(default=None)
message: Optional[str] = field(default=None)
reason: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesStatefulSetStatus:
kind: ClassVar[str] = "kubernetes_stateful_set_status"
mapping: ClassVar[Dict[str, Bender]] = {
"available_replicas": S("availableReplicas"),
"collision_count": S("collisionCount"),
"conditions": S("conditions", default=[])
>> SortTransitionTime
>> ForallBend(KubernetesStatefulSetStatusCondition.mapping),
"current_replicas": S("currentReplicas"),
"current_revision": S("currentRevision"),
"observed_generation": S("observedGeneration"),
"ready_replicas": S("readyReplicas"),
"replicas": S("replicas"),
"update_revision": S("updateRevision"),
"updated_replicas": S("updatedReplicas"),
}
available_replicas: Optional[int] = field(default=None)
collision_count: Optional[int] = field(default=None)
conditions: List[KubernetesStatefulSetStatusCondition] = field(factory=list)
current_replicas: Optional[int] = field(default=None)
current_revision: Optional[str] = field(default=None)
observed_generation: Optional[int] = field(default=None)
ready_replicas: Optional[int] = field(default=None)
replicas: Optional[int] = field(default=None)
update_revision: Optional[str] = field(default=None)
updated_replicas: Optional[int] = field(default=None)
@define
class KubernetesStatefulSetSpec:
kind: ClassVar[str] = "kubernetes_stateful_set_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"min_ready_seconds": S("minReadySeconds"),
"pod_management_policy": S("podManagementPolicy"),
"replicas": S("replicas"),
"revision_history_limit": S("revisionHistoryLimit"),
"selector": S("selector") >> Bend(KubernetesLabelSelector.mapping),
"service_name": S("serviceName"),
"template": S("template") >> Bend(KubernetesPodTemplateSpec.mapping),
}
min_ready_seconds: Optional[int] = field(default=None)
pod_management_policy: Optional[str] = field(default=None)
replicas: Optional[int] = field(default=None)
revision_history_limit: Optional[int] = field(default=None)
selector: Optional[KubernetesLabelSelector] = field(default=None)
service_name: Optional[str] = field(default=None)
template: Optional[KubernetesPodTemplateSpec] = field(default=None)
@define(eq=False, slots=False)
class KubernetesStatefulSet(KubernetesResource):
kind: ClassVar[str] = "kubernetes_stateful_set"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"stateful_set_status": S("status") >> Bend(KubernetesStatefulSetStatus.mapping),
"stateful_set_spec": S("spec") >> Bend(KubernetesStatefulSetSpec.mapping),
}
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["kubernetes_pod", "kubernetes_controller_revision"],
"delete": [],
}
}
stateful_set_status: Optional[KubernetesStatefulSetStatus] = field(default=None)
stateful_set_spec: Optional[KubernetesStatefulSetSpec] = field(default=None)
@define(eq=False, slots=False)
class KubernetesHorizontalPodAutoscalerStatus:
kind: ClassVar[str] = "kubernetes_horizontal_pod_autoscaler_status"
mapping: ClassVar[Dict[str, Bender]] = {
"current_cpu_utilization_percentage": S("currentCPUUtilizationPercentage"),
"current_replicas": S("currentReplicas"),
"desired_replicas": S("desiredReplicas"),
"last_scale_time": S("lastScaleTime"),
"observed_generation": S("observedGeneration"),
}
current_cpu_utilization_percentage: Optional[int] = field(default=None)
current_replicas: Optional[int] = field(default=None)
desired_replicas: Optional[int] = field(default=None)
last_scale_time: Optional[datetime] = field(default=None)
observed_generation: Optional[int] = field(default=None)
@define
class KubernetesCrossVersionObjectReference:
kind: ClassVar[str] = "kubernetes_cross_object_reference"
mapping: ClassVar[Dict[str, Bender]] = {
"api_version": S("apiVersion"),
"resource_kind": S("kind"),
"name": S("name"),
}
api_version: Optional[str] = field(default=None)
resource_kind: Optional[str] = field(default=None)
name: Optional[str] = field(default=None)
@define
class KubernetesHorizontalPodAutoscalerSpec:
kind: ClassVar[str] = "kubernetes_horizontal_pod_autoscaler_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"max_replicas": S("maxReplicas"),
"min_replicas": S("minReplicas"),
"scale_target_ref": S("scaleTargetRef") >> Bend(KubernetesCrossVersionObjectReference.mapping),
"target_cpu_utilization_percentage": S("targetCPUUtilizationPercentage"),
}
max_replicas: Optional[int] = field(default=None)
min_replicas: Optional[int] = field(default=None)
scale_target_ref: Optional[KubernetesCrossVersionObjectReference] = field(default=None)
target_cpu_utilization_percentage: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class KubernetesHorizontalPodAutoscaler(KubernetesResource):
kind: ClassVar[str] = "kubernetes_horizontal_pod_autoscaler"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"horizontal_pod_autoscaler_status": S("status") >> Bend(KubernetesHorizontalPodAutoscalerStatus.mapping),
"horizontal_pod_autoscaler_spec": S("spec") >> Bend(KubernetesHorizontalPodAutoscalerSpec.mapping),
}
horizontal_pod_autoscaler_status: Optional[KubernetesHorizontalPodAutoscalerStatus] = field(default=None)
horizontal_pod_autoscaler_spec: Optional[KubernetesHorizontalPodAutoscalerSpec] = field(default=None)
@define(eq=False, slots=False)
class KubernetesCronJobStatusActive:
kind: ClassVar[str] = "kubernetes_cron_job_status_active"
mapping: ClassVar[Dict[str, Bender]] = {
"api_version": S("apiVersion"),
"field_path": S("fieldPath"),
"name": S("name"),
"namespace": S("namespace"),
"resource_version": S("resourceVersion"),
"uid": S("uid"),
}
api_version: Optional[str] = field(default=None)
field_path: Optional[str] = field(default=None)
name: Optional[str] = field(default=None)
namespace: Optional[str] = field(default=None)
resource_version: Optional[str] = field(default=None)
uid: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesCronJobStatus:
kind: ClassVar[str] = "kubernetes_cron_job_status"
mapping: ClassVar[Dict[str, Bender]] = {
"active": S("active", default=[]) >> ForallBend(KubernetesCronJobStatusActive.mapping),
"last_schedule_time": S("lastScheduleTime"),
"last_successful_time": S("lastSuccessfulTime"),
}
active: List[KubernetesCronJobStatusActive] = field(factory=list)
last_schedule_time: Optional[datetime] = field(default=None)
last_successful_time: Optional[datetime] = field(default=None)
@define
class KubernetesJobSpec:
kind: ClassVar[str] = "kubernetes_job_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"active_deadline_seconds": S("activeDeadlineSeconds"),
"backoff_limit": S("backoffLimit"),
"completion_mode": S("completionMode"),
"completions": S("completions"),
"manual_selector": S("manualSelector"),
"parallelism": S("parallelism"),
"selector": S("selector") >> Bend(KubernetesLabelSelector.mapping),
"suspend": S("suspend"),
"template": S("template") >> Bend(KubernetesPodTemplateSpec.mapping),
"ttl_seconds_after_finished": S("ttlSecondsAfterFinished"),
}
active_deadline_seconds: Optional[int] = field(default=None)
backoff_limit: Optional[int] = field(default=None)
completion_mode: Optional[str] = field(default=None)
completions: Optional[int] = field(default=None)
manual_selector: Optional[bool] = field(default=None)
parallelism: Optional[int] = field(default=None)
selector: Optional[KubernetesLabelSelector] = field(default=None)
suspend: Optional[bool] = field(default=None)
template: Optional[KubernetesPodTemplateSpec] = field(default=None)
ttl_seconds_after_finished: Optional[int] = field(default=None)
@define
class KubernetesJobTemplateSpec:
kind: ClassVar[str] = "kubernetes_job_template_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"spec": S("spec") >> Bend(KubernetesJobSpec.mapping),
}
spec: Optional[KubernetesJobSpec] = field(default=None)
@define
class KubernetesCronJobSpec:
kind: ClassVar[str] = "kubernetes_cron_job_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"concurrency_policy": S("concurrencyPolicy"),
"failed_jobs_history_limit": S("failedJobsHistoryLimit"),
"job_template": S("jobTemplate") >> Bend(KubernetesJobTemplateSpec.mapping),
"schedule": S("schedule"),
"starting_deadline_seconds": S("startingDeadlineSeconds"),
"successful_jobs_history_limit": S("successfulJobsHistoryLimit"),
"suspend": S("suspend"),
"time_zone": S("timeZone"),
}
concurrency_policy: Optional[str] = field(default=None)
failed_jobs_history_limit: Optional[int] = field(default=None)
job_template: Optional[KubernetesJobTemplateSpec] = field(default=None)
schedule: Optional[str] = field(default=None)
starting_deadline_seconds: Optional[int] = field(default=None)
successful_jobs_history_limit: Optional[int] = field(default=None)
suspend: Optional[bool] = field(default=None)
time_zone: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesCronJob(KubernetesResource):
kind: ClassVar[str] = "kubernetes_cron_job"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"cron_job_status": S("status") >> Bend(KubernetesCronJobStatus.mapping),
"cron_job_spec": S("spec") >> Bend(KubernetesCronJobSpec.mapping),
}
reference_kinds: ClassVar[ModelReference] = {"successors": {"default": ["kubernetes_job"], "delete": []}}
cron_job_status: Optional[KubernetesCronJobStatus] = field(default=None)
cron_job_spec: Optional[KubernetesCronJobSpec] = field(default=None)
@define(eq=False, slots=False)
class KubernetesJobStatusConditions:
kind: ClassVar[str] = "kubernetes_job_status_conditions"
mapping: ClassVar[Dict[str, Bender]] = {
"last_probe_time": S("lastProbeTime"),
"last_transition_time": S("lastTransitionTime"),
"message": S("message"),
"reason": S("reason"),
"status": S("status"),
"type": S("type"),
}
last_probe_time: Optional[datetime] = field(default=None)
last_transition_time: Optional[datetime] = field(default=None)
message: Optional[str] = field(default=None)
reason: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesJobStatus:
kind: ClassVar[str] = "kubernetes_job_status"
mapping: ClassVar[Dict[str, Bender]] = {
"active": S("active"),
"completed_indexes": S("completedIndexes"),
"completion_time": S("completionTime"),
"conditions": S("conditions", default=[])
>> SortTransitionTime
>> ForallBend(KubernetesJobStatusConditions.mapping),
"failed": S("failed"),
"ready": S("ready"),
"start_time": S("startTime"),
"succeeded": S("succeeded"),
}
active: Optional[int] = field(default=None)
completed_indexes: Optional[str] = field(default=None)
completion_time: Optional[datetime] = field(default=None)
conditions: List[KubernetesJobStatusConditions] = field(factory=list)
failed: Optional[int] = field(default=None)
ready: Optional[int] = field(default=None)
start_time: Optional[datetime] = field(default=None)
succeeded: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class KubernetesJob(KubernetesResource):
kind: ClassVar[str] = "kubernetes_job"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"job_status": S("status") >> Bend(KubernetesJobStatus.mapping),
"job_spec": S("spec") >> Bend(KubernetesJobSpec.mapping),
}
reference_kinds: ClassVar[ModelReference] = {
"successors": {"default": ["kubernetes_pod"], "delete": ["kubernetes_cron_job"]}
}
job_status: Optional[KubernetesJobStatus] = field(default=None)
job_spec: Optional[KubernetesJobSpec] = field(default=None)
@define(eq=False, slots=False)
class KubernetesFlowSchemaStatusConditions:
kind: ClassVar[str] = "kubernetes_flow_schema_status_conditions"
mapping: ClassVar[Dict[str, Bender]] = {
"last_transition_time": S("lastTransitionTime"),
"message": S("message"),
"reason": S("reason"),
"status": S("status"),
"type": S("type"),
}
last_transition_time: Optional[datetime] = field(default=None)
message: Optional[str] = field(default=None)
reason: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesFlowSchemaStatus:
kind: ClassVar[str] = "kubernetes_flow_schema_status"
mapping: ClassVar[Dict[str, Bender]] = {
"conditions": S("conditions", default=[])
>> SortTransitionTime
>> ForallBend(KubernetesFlowSchemaStatusConditions.mapping),
}
conditions: List[KubernetesFlowSchemaStatusConditions] = field(factory=list)
@define(eq=False, slots=False)
class KubernetesFlowSchema(KubernetesResource):
kind: ClassVar[str] = "kubernetes_flow_schema"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"flow_schema_status": S("status") >> Bend(KubernetesFlowSchemaStatus.mapping),
}
flow_schema_status: Optional[KubernetesFlowSchemaStatus] = field(default=None)
@define(eq=False, slots=False)
class KubernetesPriorityLevelConfigurationStatusConditions:
kind: ClassVar[str] = "kubernetes_priority_level_configuration_status_conditions"
mapping: ClassVar[Dict[str, Bender]] = {
"last_transition_time": S("lastTransitionTime"),
"message": S("message"),
"reason": S("reason"),
"status": S("status"),
"type": S("type"),
}
last_transition_time: Optional[datetime] = field(default=None)
message: Optional[str] = field(default=None)
reason: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesPriorityLevelConfigurationStatus:
kind: ClassVar[str] = "kubernetes_priority_level_configuration_status"
mapping: ClassVar[Dict[str, Bender]] = {
"conditions": S("conditions", default=[])
>> SortTransitionTime
>> ForallBend(KubernetesPriorityLevelConfigurationStatusConditions.mapping),
}
conditions: List[KubernetesPriorityLevelConfigurationStatusConditions] = field(factory=list)
@define(eq=False, slots=False)
class KubernetesPriorityLevelConfiguration(KubernetesResource):
kind: ClassVar[str] = "kubernetes_priority_level_configuration"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"priority_level_configuration_status": S("status") >> Bend(KubernetesPriorityLevelConfigurationStatus.mapping),
}
priority_level_configuration_status: Optional[KubernetesPriorityLevelConfigurationStatus] = field(default=None)
@define(eq=False, slots=False)
class KubernetesIngressStatusLoadbalancerIngressPorts:
kind: ClassVar[str] = "kubernetes_ingress_status_loadbalancer_ingress_ports"
mapping: ClassVar[Dict[str, Bender]] = {
"error": S("error"),
"port": S("port"),
"protocol": S("protocol"),
}
error: Optional[str] = field(default=None)
port: Optional[int] = field(default=None)
protocol: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesIngressStatusLoadbalancerIngress:
kind: ClassVar[str] = "kubernetes_ingress_status_loadbalancer_ingress"
mapping: ClassVar[Dict[str, Bender]] = {
"hostname": S("hostname"),
"ip": S("ip"),
"ports": S("ports", default=[]) >> ForallBend(KubernetesIngressStatusLoadbalancerIngressPorts.mapping),
}
hostname: Optional[str] = field(default=None)
ip: Optional[str] = field(default=None)
ports: List[KubernetesIngressStatusLoadbalancerIngressPorts] = field(factory=list)
@define(eq=False, slots=False)
class KubernetesIngressStatusLoadbalancer:
kind: ClassVar[str] = "kubernetes_ingress_status_loadbalancer"
mapping: ClassVar[Dict[str, Bender]] = {
"ingress": S("ingress", default=[]) >> ForallBend(KubernetesIngressStatusLoadbalancerIngress.mapping),
}
ingress: List[KubernetesIngressStatusLoadbalancerIngress] = field(factory=list)
@define(eq=False, slots=False)
class KubernetesIngressStatus:
kind: ClassVar[str] = "kubernetes_ingress_status"
mapping: ClassVar[Dict[str, Bender]] = {
"load_balancer": S("loadBalancer") >> Bend(KubernetesIngressStatusLoadbalancer.mapping),
}
load_balancer: Optional[KubernetesIngressStatusLoadbalancer] = field(default=None)
@define
class KubernetesIngressRule:
kind: ClassVar[str] = "kubernetes_ingress_rule"
mapping: ClassVar[Dict[str, Bender]] = {
"host": S("host"),
"http": S("http"),
}
host: Optional[str] = field(default=None)
http: Optional[Any] = field(default=None)
@define
class KubernetesIngressTLS:
kind: ClassVar[str] = "kubernetes_ingress_tls"
mapping: ClassVar[Dict[str, Bender]] = {
"hosts": S("hosts", default=[]),
"secret_name": S("secretName"),
}
hosts: List[str] = field(factory=list)
secret_name: Optional[str] = field(default=None)
@define
class KubernetesIngressSpec:
kind: ClassVar[str] = "kubernetes_ingress_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"ingress_class_name": S("ingressClassName"),
"rules": S("rules", default=[]) >> ForallBend(KubernetesIngressRule.mapping),
"tls": S("tls", default=[]) >> ForallBend(KubernetesIngressTLS.mapping),
}
ingress_class_name: Optional[str] = field(default=None)
rules: List[KubernetesIngressRule] = field(factory=list)
tls: List[KubernetesIngressTLS] = field(factory=list)
def get_backend_service_names(json: Json) -> List[str]:
default_services: Optional[str] = bend(
S(
"spec",
"defaultBackend",
"service",
"name",
),
json,
)
services_from_rules: List[str] = bend(
S("spec", "rules", default=[])
>> ForallBend(S("http", "paths", default=[]) >> ForallBend(S("backend", "service", "name")))
>> F(lambda outer: [elem for inner in outer for elem in inner if elem]),
json,
)
if default_services:
services_from_rules.append(default_services)
return services_from_rules
@define(eq=False, slots=False)
class KubernetesIngress(KubernetesResource, BaseLoadBalancer):
kind: ClassVar[str] = "kubernetes_ingress"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"ingress_status": S("status") >> Bend(KubernetesIngressStatus.mapping),
"public_ip_address": S("status", "loadBalancer", "ingress", default=[])[0]["ip"],
# take the public ip of the first load balancer
"ingress_spec": S("spec") >> Bend(KubernetesIngressSpec.mapping),
# temporary values, they will be replaced in connect_in_graph call with pod ids
"backends": F(get_backend_service_names),
}
ingress_status: Optional[KubernetesIngressStatus] = field(default=None)
ingress_spec: Optional[KubernetesIngressSpec] = field(default=None)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
super().connect_in_graph(builder, source)
pods = [
((key, val), pod)
for pod in builder.graph.nodes
if isinstance(pod, KubernetesPod)
for key, val in pod.labels.items()
]
pods_by_labels: Dict[Tuple[str, str], List[KubernetesPod]] = defaultdict(list)
for (key, val), pod in pods:
pods_by_labels[(key, val)].append(pod)
resolved_backends: Set[str] = set()
for backend in self.backends:
for service in builder.graph.searchall({"kind": KubernetesService.kind, "name": backend}):
if not isinstance(service, KubernetesService):
continue
builder.add_edge(self, edge_type=EdgeType.default, node=service)
selector = service.service_spec.selector if service.service_spec else {}
if not selector:
continue
for key, value in selector.items():
for pod in pods_by_labels.get((key, value), []):
resolved_backends.add(pod.name or pod.id)
self.backends = list(resolved_backends)
@define(eq=False, slots=False)
class KubernetesIngressClass(KubernetesResource):
kind: ClassVar[str] = "kubernetes_ingress_class"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {}
@define(eq=False, slots=False)
class KubernetesNetworkPolicyStatusConditions:
kind: ClassVar[str] = "kubernetes_network_policy_status_conditions"
mapping: ClassVar[Dict[str, Bender]] = {
"last_transition_time": S("lastTransitionTime"),
"message": S("message"),
"observed_generation": S("observedGeneration"),
"reason": S("reason"),
"status": S("status"),
"type": S("type"),
}
last_transition_time: Optional[datetime] = field(default=None)
message: Optional[str] = field(default=None)
observed_generation: Optional[int] = field(default=None)
reason: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesNetworkPolicyStatus:
kind: ClassVar[str] = "kubernetes_network_policy_status"
mapping: ClassVar[Dict[str, Bender]] = {
"conditions": S("conditions", default=[])
>> SortTransitionTime
>> ForallBend(KubernetesNetworkPolicyStatusConditions.mapping),
}
conditions: List[KubernetesNetworkPolicyStatusConditions] = field(factory=list)
@define(eq=False, slots=False)
class KubernetesNetworkPolicy(KubernetesResource):
kind: ClassVar[str] = "kubernetes_network_policy"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"network_policy_status": S("status") >> Bend(KubernetesNetworkPolicyStatus.mapping),
}
network_policy_status: Optional[KubernetesNetworkPolicyStatus] = field(default=None)
@define(eq=False, slots=False)
class KubernetesRuntimeClass(KubernetesResource):
kind: ClassVar[str] = "kubernetes_runtime_class"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {}
@define(eq=False, slots=False)
class KubernetesPodDisruptionBudgetStatusConditions:
kind: ClassVar[str] = "kubernetes_pod_disruption_budget_status_conditions"
mapping: ClassVar[Dict[str, Bender]] = {
"last_transition_time": S("lastTransitionTime"),
"message": S("message"),
"observed_generation": S("observedGeneration"),
"reason": S("reason"),
"status": S("status"),
"type": S("type"),
}
last_transition_time: Optional[datetime] = field(default=None)
message: Optional[str] = field(default=None)
observed_generation: Optional[int] = field(default=None)
reason: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class KubernetesPodDisruptionBudgetStatus:
kind: ClassVar[str] = "kubernetes_pod_disruption_budget_status"
mapping: ClassVar[Dict[str, Bender]] = {
"conditions": S("conditions", default=[])
>> SortTransitionTime
>> ForallBend(KubernetesPodDisruptionBudgetStatusConditions.mapping),
"current_healthy": S("currentHealthy"),
"desired_healthy": S("desiredHealthy"),
"disrupted_pods": S("disruptedPods"),
"disruptions_allowed": S("disruptionsAllowed"),
"expected_pods": S("expectedPods"),
"observed_generation": S("observedGeneration"),
}
conditions: List[KubernetesPodDisruptionBudgetStatusConditions] = field(factory=list)
current_healthy: Optional[int] = field(default=None)
desired_healthy: Optional[int] = field(default=None)
disrupted_pods: Optional[Any] = field(default=None)
disruptions_allowed: Optional[int] = field(default=None)
expected_pods: Optional[int] = field(default=None)
observed_generation: Optional[int] = field(default=None)
@define
class KubernetesPodDisruptionBudgetSpec:
kind: ClassVar[str] = "kubernetes_pod_disruption_budget_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"max_unavailable": S("maxUnavailable"),
"min_available": S("minAvailable"),
"selector": S("selector") >> Bend(KubernetesLabelSelector.mapping),
}
max_unavailable: Optional[Union[str, int]] = field(default=None)
min_available: Optional[Union[str, int]] = field(default=None)
selector: Optional[KubernetesLabelSelector] = field(default=None)
@define(eq=False, slots=False)
class KubernetesPodDisruptionBudget(KubernetesResource):
kind: ClassVar[str] = "kubernetes_pod_disruption_budget"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"pod_disruption_budget_status": S("status") >> Bend(KubernetesPodDisruptionBudgetStatus.mapping),
"pod_disruption_budget_spec": S("spec") >> Bend(KubernetesPodDisruptionBudgetSpec.mapping),
}
pod_disruption_budget_status: Optional[KubernetesPodDisruptionBudgetStatus] = field(default=None)
pod_disruption_budget_spec: Optional[KubernetesPodDisruptionBudgetSpec] = field(default=None)
@define(eq=False, slots=False)
class KubernetesClusterRole(KubernetesResource):
kind: ClassVar[str] = "kubernetes_cluster_role"
@define(eq=False, slots=False)
class KubernetesClusterRoleBinding(KubernetesResource):
kind: ClassVar[str] = "kubernetes_cluster_role_binding"
@define(eq=False, slots=False)
class KubernetesRole(KubernetesResource):
kind: ClassVar[str] = "kubernetes_role"
@define(eq=False, slots=False)
class KubernetesRoleBinding(KubernetesResource):
kind: ClassVar[str] = "kubernetes_role_binding"
@define(eq=False, slots=False)
class KubernetesPriorityClass(KubernetesResource):
kind: ClassVar[str] = "kubernetes_priority_class"
@define(eq=False, slots=False)
class KubernetesCSIDriver(KubernetesResource):
kind: ClassVar[str] = "kubernetes_csi_driver"
@define(eq=False, slots=False)
class KubernetesCSINode(KubernetesResource):
kind: ClassVar[str] = "kubernetes_csi_node"
@define(eq=False, slots=False)
class KubernetesCSIStorageCapacity(KubernetesResource):
kind: ClassVar[str] = "kubernetes_csi_storage_capacity"
@define(eq=False, slots=False)
class KubernetesStorageClass(KubernetesResource):
kind: ClassVar[str] = "kubernetes_storage_class"
@define(eq=False, slots=False)
class KubernetesVolumeError:
kind: ClassVar[str] = "kubernetes_volume_error"
mapping: ClassVar[Dict[str, Bender]] = {
"message": S("message"),
"time": S("time"),
}
message: Optional[str] = field(default=None)
time: Optional[datetime] = field(default=None)
@define(eq=False, slots=False)
class KubernetesVolumeAttachmentStatus:
kind: ClassVar[str] = "kubernetes_volume_attachment_status"
mapping: ClassVar[Dict[str, Bender]] = {
"attach_error": S("attachError") >> Bend(KubernetesVolumeError.mapping),
"attached": S("attached"),
"attachment_metadata": S("attachmentMetadata"),
"detach_error": S("detachError") >> Bend(KubernetesVolumeError.mapping),
}
attach_error: Optional[KubernetesVolumeError] = field(default=None)
attached: Optional[bool] = field(default=None)
attachment_metadata: Optional[Any] = field(default=None)
detach_error: Optional[KubernetesVolumeError] = field(default=None)
@define
class KubernetesVolumeAttachmentSpec:
kind: ClassVar[str] = "kubernetes_volume_attachment_spec"
mapping: ClassVar[Dict[str, Bender]] = {
"attacher": S("attacher"),
"node_name": S("nodeName"),
"source": S("source"),
}
attacher: Optional[str] = field(default=None)
node_name: Optional[str] = field(default=None)
source: Optional[Any] = field(default=None)
@define(eq=False, slots=False)
class KubernetesVolumeAttachment(KubernetesResource):
kind: ClassVar[str] = "kubernetes_volume_attachment"
mapping: ClassVar[Dict[str, Bender]] = KubernetesResource.mapping | {
"volume_attachment_status": S("status") >> Bend(KubernetesVolumeAttachmentStatus.mapping),
"volume_attachment_spec": S("spec") >> Bend(KubernetesVolumeAttachmentSpec.mapping),
}
volume_attachment_status: Optional[KubernetesVolumeAttachmentStatus] = field(default=None)
volume_attachment_spec: Optional[KubernetesVolumeAttachmentSpec] = field(default=None)
workload_resources: List[Type[KubernetesResource]] = [
KubernetesControllerRevision,
KubernetesCronJob,
KubernetesDaemonSet,
KubernetesDeployment,
KubernetesHorizontalPodAutoscaler,
KubernetesJob,
KubernetesPod,
KubernetesPodTemplate,
KubernetesPriorityClass,
KubernetesReplicaSet,
KubernetesReplicationController,
KubernetesStatefulSet,
]
service_resources: List[Type[KubernetesResource]] = [
KubernetesEndpointSlice,
KubernetesEndpoints,
KubernetesIngress,
KubernetesIngressClass,
KubernetesService,
]
config_storage_resources: List[Type[KubernetesResource]] = [
KubernetesCSIDriver,
KubernetesCSINode,
KubernetesCSIStorageCapacity,
KubernetesConfigMap,
KubernetesPersistentVolume,
KubernetesPersistentVolumeClaim,
KubernetesSecret,
KubernetesStorageClass,
# KubernetesVolume,
KubernetesVolumeAttachment,
]
authentication_resources: List[Type[KubernetesResource]] = [
# KubernetesCertificateSigningRequest,
# KubernetesTokenRequest,
# KubernetesTokenReview,
KubernetesServiceAccount,
]
authorization_resources: List[Type[KubernetesResource]] = [
# KubernetesLocalSubjectAccessReview,
# KubernetesSelfSubjectAccessReview,
# KubernetesSelfSubjectRulesReview,
# KubernetesSubjectAccessReview,
KubernetesClusterRole,
KubernetesClusterRoleBinding,
KubernetesRole,
KubernetesRoleBinding,
]
policy_resources: List[Type[KubernetesResource]] = [
# KubernetesPodSecurityPolicy
KubernetesLimitRange,
KubernetesNetworkPolicy,
KubernetesPodDisruptionBudget,
KubernetesResourceQuota,
]
extend_resources: List[Type[KubernetesResource]] = [
# KubernetesCustomResourceDefinition,
KubernetesMutatingWebhookConfiguration,
KubernetesValidatingWebhookConfiguration,
]
cluster_resources: List[Type[KubernetesResource]] = [
# KubernetesApiService,
# KubernetesBinding
# KubernetesLease,
# KubernetesComponentStatus,
# KubernetesEvent, # ignore events
KubernetesFlowSchema,
KubernetesNamespace,
KubernetesNode,
KubernetesPriorityLevelConfiguration,
KubernetesRuntimeClass,
]
all_k8s_resources: List[Type[KubernetesResource]] = (
workload_resources
+ service_resources
+ config_storage_resources
+ authentication_resources
+ authorization_resources
+ policy_resources
+ extend_resources
+ cluster_resources
)
all_k8s_resources_by_k8s_name: Dict[str, Type[KubernetesResource]] = {a.k8s_name(): a for a in all_k8s_resources}
all_k8s_resources_by_resoto_name: Dict[str, Type[KubernetesResource]] = {a.kind: a for a in all_k8s_resources} | /resoto_plugin_k8s-3.6.5-py3-none-any.whl/resoto_plugin_k8s/resources.py | 0.815673 | 0.174797 | resources.py | pypi |
from typing import Optional, List
import requests
from .resources import PosthogProject, PosthogEvent
class PosthogAPI:
def __init__(self, api_key: str, url: str) -> None:
self.api_key = api_key
self.projects_api = f"{url}/api/projects"
def project(self, pro: str) -> PosthogProject:
"""Returns a PosthogProject given a project name"""
next = self.projects_api
while next is not None:
r = self._get(next)
for p in r.get("results"):
if p.get("name") == pro:
data = self._get(f"{self.projects_api}/{p.get('id')}")
return PosthogProject.new(data)
next = r.get("next")
def events(self, project_id: int) -> List[PosthogEvent]:
"""Return all event definitions for a specific posthog project"""
next = f"{self.projects_api}/{project_id}/event_definitions"
events: List[PosthogEvent] = []
while next is not None:
r = self._get(next)
for event in r.get("results"):
data = event
data["project_id"] = project_id
e = PosthogEvent.new(data)
events.append(e)
next = r.get("next")
for event in events:
metrics = self.insights(event, "-1h")
event.count = int(metrics.get("result")[0].get("count"))
return events
def insights(self, event: PosthogEvent, since: str):
uri = f"{self.projects_api}/{event.project_id}/insights/trend/"
params = {
"insight": "TRENDS",
"events": [{"id": event.name, "name": event.name, "order": 0}],
"date_from": since,
}
r = self._get(uri, headers={"Content-Type": "application/json"}, params=params)
return r
def _get(self, uri: str, headers: Optional[dict] = {}, params: Optional[dict] = None) -> Optional[dict]:
auth_headers = {"Authorization": f"Bearer {self.api_key}"}
headers.update(auth_headers)
r = requests.get(uri, headers=headers, json=params)
if r.status_code != 200:
raise RuntimeError(f"Error requesting insights: {uri} {r.text} ({r.status_code})")
return r.json() | /resoto_plugin_posthog-3.6.5-py3-none-any.whl/resoto_plugin_posthog/posthog.py | 0.884695 | 0.177597 | posthog.py | pypi |
from datetime import datetime
from attrs import define
from typing import Optional, ClassVar, List, Dict
from resotolib.graph import Graph
from resotolib.baseresources import BaseAccount, BaseResource
@define(eq=False, slots=False)
class PosthogResource:
kind: ClassVar[str] = "posthog_resource"
def delete(self, graph: Graph) -> bool:
return False
def update_tag(self, key, value) -> bool:
return False
def delete_tag(self, key) -> bool:
return False
@define(eq=False, slots=False)
class PosthogProject(PosthogResource, BaseAccount):
kind: ClassVar[str] = "posthog_project"
project_id: int
app_urls: Optional[List[str]] = (None,)
slack_incoming_webhook: Optional[List[str]] = (None,)
anonymize_ips: Optional[bool] = (None,)
completed_snippet_onboarding: Optional[bool] = (None,)
timezone: Optional[str] = (None,)
test_account_filters: Optional[object] = (None,)
test_account_filters_default_checked: Optional[bool] = (None,)
path_cleaning_filters: Optional[object] = (None,)
data_attributes: Optional[object] = (None,)
person_display_name_properties: Optional[List[str]] = (None,)
correlation_config: Optional[Dict] = (None,)
session_recording_opt_in: Optional[bool] = (None,)
access_control: Optional[bool] = (None,)
primary_dashboard: Optional[int] = (None,)
live_events_columns: Optional[List[str]] = (None,)
recording_domains: Optional[List[str]] = None
@staticmethod
def new(data: Dict) -> "PosthogProject":
return PosthogProject(
id=data.get("uuid"),
project_id=data.get("id"),
name=data.get("name"),
mtime=convert_date(data.get("updated_at")),
ctime=convert_date(data.get("created_at")),
app_urls=data.get("app_urls"),
slack_incoming_webhook=data.get("slack_incoming_webhook"),
anonymize_ips=data.get("anonymize_ips"),
completed_snippet_onboarding=data.get("completed_snippet_onboarding"),
timezone=data.get("timezone"),
test_account_filters=data.get("test_account_filters"),
test_account_filters_default_checked=data.get("test_account_filters_default_checked"),
path_cleaning_filters=data.get("path_cleaning_filters"),
data_attributes=data.get("data_attributes"),
person_display_name_properties=data.get("person_display_name_properties"),
correlation_config=data.get("correlation_config"),
session_recording_opt_in=data.get("session_recording_opt_in"),
access_control=data.get("access_control"),
primary_dashboard=data.get("primary_dashboard"),
live_events_columns=data.get("live_events_columns"),
recording_domains=data.get("recording_domains"),
)
@define(eq=False, slots=False)
class PosthogEvent(PosthogResource, BaseResource):
kind: ClassVar[str] = "posthog_event"
project_id: int
count: int = 0
description: Optional[str] = None
posthog_tags: Optional[List[str]] = None
volume_30_day: Optional[int] = None
query_usage_30_day: Optional[int] = None
is_action: Optional[bool] = None
action_id: Optional[int] = None
last_seen_at: Optional[str] = None
verified: Optional[bool] = None
verified_at: Optional[str] = None
is_calculating: Optional[bool] = None
last_calculated_at: Optional[str] = None
post_to_slack: Optional[bool] = None
@staticmethod
def new(data: Dict) -> BaseResource:
return PosthogEvent(
id=data.get("id"),
name=data.get("name"),
mtime=convert_date(data.get("last_updated_at")),
ctime=convert_date(data.get("created_at")),
project_id=data.get("project_id"),
description=data.get("description"),
volume_30_day=data.get("volume_30_day"),
query_usage_30_day=data.get("query_usage_30_day"),
is_action=data.get("is_action"),
action_id=data.get("action_id"),
is_calculating=data.get("is_calculating"),
last_calculated_at=data.get("last_calculated_at"),
post_to_slack=data.get("post_to_slack"),
)
def convert_date(date_str: str) -> Optional[datetime]:
try:
return datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%S.%fZ")
except Exception:
return None | /resoto_plugin_posthog-3.6.5-py3-none-any.whl/resoto_plugin_posthog/resources.py | 0.841826 | 0.208582 | resources.py | pypi |
from attrs import define, field
from typing import ClassVar, Dict, List
default_config = {
"example": {
"Example Account": {
"us-west": {"example_instance": ["someInstance1"]},
},
},
}
@define
class ProtectorConfig:
kind: ClassVar[str] = "plugin_protector"
enabled: bool = field(
default=False,
metadata={"description": "Enable plugin?", "restart_required": True},
)
config: Dict[str, Dict[str, Dict[str, Dict[str, List[str]]]]] = field(
factory=lambda: default_config,
metadata={
"description": (
"Configuration for the plugin\n"
"Format:\n"
" cloud.id:\n"
" account.id:\n"
" region.id:\n"
" kind:\n"
" - resource.id"
)
},
)
@staticmethod
def validate(cfg: "ProtectorConfig") -> bool:
config = cfg.config
if not isinstance(config, dict):
raise ValueError("Config is no dict")
for cloud_id, account_data in config.items():
if not isinstance(cloud_id, str):
raise ValueError(f"Cloud ID {cloud_id} is no string")
if not isinstance(account_data, dict):
raise ValueError(f"Account Data {account_data} is no dict")
for account_id, region_data in account_data.items():
if not isinstance(account_id, str):
raise ValueError(f"Account ID {account_id} is no string")
if not isinstance(region_data, dict):
raise ValueError(f"Region Data {region_data} is no dict")
for region_id, resource_data in region_data.items():
if not isinstance(region_id, str):
raise ValueError(f"Region ID {region_id} is no string")
if not isinstance(resource_data, dict):
raise ValueError(f"Resource Data {resource_data} is no dict")
for kind, resource_list in resource_data.items():
if not isinstance(kind, str):
raise ValueError(f"Resource Kind {kind} is no string")
if not isinstance(resource_list, list):
raise ValueError(f"Resource List {resource_list} is no list")
for resource_id in resource_list:
if not isinstance(resource_id, str):
raise ValueError(f"Resource ID {resource_id} is no string")
return True | /resoto-plugin-protector-3.6.5.tar.gz/resoto-plugin-protector-3.6.5/resoto_plugin_protector/config.py | 0.7478 | 0.165391 | config.py | pypi |
import random
import hashlib
import time
from resotolib.baseresources import BaseResource, VolumeStatus, InstanceStatus
from resotolib.logger import log
from resotolib.baseplugin import BaseCollectorPlugin
from resotolib.graph import Graph
from resotolib.args import ArgumentParser
from resotolib.config import Config
from .config import RandomConfig
from .resources import (
first_names,
purposes,
instance_statuses,
instance_types,
volume_statuses,
region_templates,
RandomAccount,
RandomRegion,
RandomNetwork,
RandomLoadBalancer,
RandomInstance,
RandomVolume,
)
from typing import List, Callable, Dict, Optional, Type
employees = []
class RandomCollectorPlugin(BaseCollectorPlugin):
cloud = "random"
def collect(self) -> None:
"""This method is being called by resoto whenever the collector runs
It is responsible for querying the cloud APIs for remote resources and adding
them to the plugin graph.
The graph root (self.graph.root) must always be followed by one or more
accounts. An account must always be followed by a region.
A region can contain arbitrary resources.
"""
log.debug("plugin: collecting random resources")
random.seed(Config.random.seed)
add_random_resources(self.graph)
random.seed()
@staticmethod
def add_args(arg_parser: ArgumentParser) -> None:
pass
@staticmethod
def add_config(config: Config) -> None:
config.add_config(RandomConfig)
def get_id(input: str, digest_size: int = 10) -> str:
return hashlib.blake2b(str(input).encode(), digest_size=digest_size).digest().hex()
def add_random_resources(graph: Graph) -> None:
global employees
min_employees = round(Config.random.size * 5)
max_employees = round(Config.random.size * 30)
employees = random.choices(first_names, k=random.randint(min_employees, max_employees))
add_accounts(graph)
def add_accounts(graph: Graph) -> None:
num_accounts = random.randint(1, 10)
log.debug(f"Adding {num_accounts} accounts")
for account_num in range(num_accounts):
account_id = str(int(get_id(f"account_{account_num}", 6), 16))
account = RandomAccount(id=account_id, tags={}, name=f"Random Account {account_num}")
graph.add_resource(graph.root, account)
add_regions(graph, [account], account=account)
def add_regions(graph: Graph, parents: List[BaseResource], account: BaseResource = None) -> None:
min_num_total_regions = round(Config.random.size * 10)
max_num_total_regions = round(Config.random.size * 100)
num_total_regions = random.randint(min_num_total_regions, max_num_total_regions)
all_regions = {}
r_num = 1
i = 0
while i < num_total_regions:
for rt_short, rt_long in region_templates.items():
r_short = f"{rt_short}{r_num}"
r_long = f"{rt_long} {r_num}"
all_regions[r_short] = r_long
i += 1
if i >= num_total_regions:
break
r_num += 1
min_num_regions = round(Config.random.size * 1)
max_num_regions = round(Config.random.size * 4)
num_regions = random.randint(min_num_regions, max_num_regions)
regions = random.sample(sorted(all_regions), num_regions)
log.debug(f"Adding {num_regions} regions {regions} in {account.rtdname}")
for r in regions:
region = RandomRegion(id=r, tags={}, name=all_regions[r], account=account)
graph.add_node(region)
for parent in parents:
graph.add_edge(parent, region)
id_path = f"{account.id}/{region.id}"
add_networks(graph, [region], account=account, region=region, id_path=id_path)
def add_networks(
graph: Graph,
parents: List[BaseResource],
id_path: str,
num: Optional[int] = None,
account: BaseResource = None,
region: BaseResource = None,
kwargs: Optional[Dict] = None,
) -> None:
add_resources(
graph=graph,
parents=parents,
children=[add_instance_groups],
cls=RandomNetwork,
short_prefix="rndnet-",
long_prefix="Network",
min=1,
max=4,
num=num,
id_path=id_path,
account=account,
region=region,
)
def add_instance_groups(
graph: Graph,
parents: List[BaseResource],
id_path: str,
num: Optional[int] = None,
account: BaseResource = None,
region: BaseResource = None,
kwargs: Optional[Dict] = None,
) -> None:
min_num_groups = round(Config.random.size * 5)
max_num_groups = round(Config.random.size * 50)
num_groups = random.randint(min_num_groups, max_num_groups)
log.debug(f"Adding {num_groups} instance groups in {region.rtdname}")
instance_status = random.choices(instance_statuses, weights=[1, 85, 1, 11, 1, 1], k=1)[0]
instance_type = random.choices(list(instance_types), weights=[10, 10, 20, 50, 20, 10, 5, 5], k=1)[0]
tags = {}
long_prefix = "Instance"
purpose = random.choice(purposes)
tags["costCenter"] = purpose[0]
has_owner = random.randrange(100) < 90
if has_owner:
owner = random.choice(employees)
tags["owner"] = owner
long_prefix = purpose[1]
kwargs = {
"tags": tags,
"instance_status": instance_status,
"instance_type": instance_type,
"instance_cores": instance_types[instance_type][0],
"instance_memory": instance_types[instance_type][1],
}
add_instances(
graph=graph,
parents=parents,
id_path=id_path,
num=num,
long_prefix=long_prefix,
account=account,
region=region,
kwargs=kwargs,
)
def add_instances(
graph: Graph,
parents: List[BaseResource],
id_path: str,
long_prefix: str,
num: Optional[int] = None,
account: BaseResource = None,
region: BaseResource = None,
kwargs: Optional[Dict] = None,
) -> None:
if long_prefix.startswith("Webserver"):
lb = add_loadbalancer(
graph=graph, id_path=id_path, parents=parents, account=account, region=region, kwargs=kwargs
)
parents.append(lb)
volume_status = random.choices(volume_statuses, weights=[2, 15, 80, 1, 1, 1], k=1)[0]
volume_tags = kwargs.get("tags", {})
volume_size = random.choices([20, 100, 200, 400, 800, 1000, 4000], weights=[70, 40, 30, 5, 5, 20, 1], k=1)[0]
child_kwargs = {
"tags": volume_tags,
"volume_status": volume_status,
"volume_type": "ssd",
"volume_size": volume_size,
}
instance_status_map: Dict[str, InstanceStatus] = {
"pending": InstanceStatus.BUSY,
"running": InstanceStatus.RUNNING,
"shutting-down": InstanceStatus.BUSY,
"terminated": InstanceStatus.TERMINATED,
"stopping": InstanceStatus.BUSY,
"stopped": InstanceStatus.STOPPED,
}
if kwargs:
kwargs["instance_status"] = instance_status_map.get(kwargs.get("instance_status", ""), InstanceStatus.UNKNOWN)
add_resources(
graph=graph,
parents=parents,
children=[add_volumes],
cls=RandomInstance,
short_prefix="rndi-",
long_prefix=long_prefix,
min=0,
max=50,
num=num,
num_children=random.randint(1, 5),
jitter=int(time.time() % 3),
id_path=id_path,
account=account,
region=region,
kwargs=kwargs,
child_kwargs=child_kwargs,
)
def add_volumes(
graph: Graph,
parents: List[BaseResource],
id_path: str,
num: Optional[int] = None,
account: BaseResource = None,
region: BaseResource = None,
kwargs: Optional[Dict] = None,
) -> None:
volume_status_map: Dict[str, VolumeStatus] = {
"creating": VolumeStatus.BUSY,
"available": VolumeStatus.AVAILABLE,
"in-use": VolumeStatus.IN_USE,
"deleting": VolumeStatus.BUSY,
"deleted": VolumeStatus.DELETED,
"error": VolumeStatus.ERROR,
"busy": VolumeStatus.BUSY,
}
if kwargs:
kwargs["volume_status"] = volume_status_map.get(kwargs.get("volume_status", ""), VolumeStatus.UNKNOWN)
add_resources(
graph=graph,
parents=parents,
children=[],
cls=RandomVolume,
short_prefix="rndvol-",
long_prefix="Volume",
min=1,
max=5,
num=num,
id_path=id_path,
account=account,
region=region,
kwargs=kwargs,
)
def add_resources(
graph: Graph,
parents: List[BaseResource],
children: List[Callable],
cls: Type[BaseResource],
short_prefix: str,
long_prefix: str,
min: int,
max: int,
id_path: str,
jitter: int = 0,
num: Optional[int] = None,
num_children: Optional[int] = None,
account: BaseResource = None,
region: BaseResource = None,
kwargs: Optional[Dict] = None,
child_kwargs: Optional[Dict] = None,
) -> None:
if kwargs is None:
kwargs = {"tags": {}}
if num:
num_resources = num
else:
min = round(Config.random.size * min)
max = round(Config.random.size * max)
num_resources = random.randint(min, max) + jitter
log.debug(
f"Adding {num_resources} {long_prefix} resources in {account.rtdname} {region.rtdname} with"
f" parents: {parents}, children: {children}"
)
for resource_num in range(num_resources):
resource_id_path = f"{id_path}/{short_prefix}{resource_num}"
log.debug(f"Adding {long_prefix} {resource_num} resource ({id_path})")
resource_id = short_prefix + get_id(resource_id_path)
name = f"{long_prefix} {resource_num}"
resource = cls(id=resource_id, name=name, account=account, region=region, **kwargs)
graph.add_node(resource)
for parent in parents:
graph.add_edge(parent, resource)
child_parents = [resource]
if region != resource:
child_parents.append(region)
for child in children:
child(
graph=graph,
parents=child_parents,
id_path=resource_id_path,
account=account,
region=region,
num=num_children,
kwargs=child_kwargs,
)
def add_loadbalancer(
graph: Graph,
id_path: str,
parents: List[BaseResource],
account: BaseResource = None,
region: BaseResource = None,
kwargs: Optional[Dict] = None,
) -> BaseResource:
resource_id_path = f"{id_path}/lb"
log.debug(f"Adding load balancer resource ({id_path}) ({kwargs})")
if kwargs is None:
tags = {}
else:
tags = kwargs.get("tags", {})
resource_id = "rndlb-" + get_id(resource_id_path)
lb = RandomLoadBalancer(id=resource_id, tags=tags, name="LoadBalancer", account=account, region=region)
graph.add_node(lb)
for parent in parents:
graph.add_edge(parent, lb)
return lb | /resoto_plugin_random-3.6.5-py3-none-any.whl/resoto_plugin_random/__init__.py | 0.672224 | 0.180395 | __init__.py | pypi |
from resotolib.logger import log
from attrs import define
from typing import ClassVar
from resotolib.graph import Graph
from resotolib.baseresources import (
BaseAccount,
BaseRegion,
BaseInstance,
BaseNetwork,
BaseVolume,
BaseLoadBalancer,
)
@define(eq=False, slots=False)
class RandomAccount(BaseAccount):
kind: ClassVar[str] = "random_account"
def delete(self, graph: Graph) -> bool:
return NotImplemented
@define(eq=False, slots=False)
class RandomRegion(BaseRegion):
kind: ClassVar[str] = "random_region"
def delete(self, graph: Graph) -> bool:
"""Regions can usually not be deleted so we return NotImplemented"""
return NotImplemented
@define(eq=False, slots=False)
class RandomResource:
"""A class that implements the abstract method delete() as well as update_tag()
and delete_tag().
delete() must be implemented. update_tag() and delete_tag() are optional.
"""
kind: ClassVar[str] = "random_resource"
def delete(self, graph: Graph) -> bool:
"""Delete a resource in the cloud"""
log.debug(f"Deleting resource {self.id} in account {self.account(graph).id} region {self.region(graph).id}")
return True
def update_tag(self, key, value) -> bool:
"""Update a resource tag in the cloud"""
log.debug(f"Updating or setting tag {key}: {value} on resource {self.id}")
return True
def delete_tag(self, key) -> bool:
"""Delete a resource tag in the cloud"""
log.debug(f"Deleting tag {key} on resource {self.id}")
return True
@define(eq=False, slots=False)
class RandomInstance(RandomResource, BaseInstance):
kind: ClassVar[str] = "random_instance"
@define(eq=False, slots=False)
class RandomVolume(RandomResource, BaseVolume):
kind: ClassVar[str] = "random_volume"
@define(eq=False, slots=False)
class RandomNetwork(RandomResource, BaseNetwork):
kind: ClassVar[str] = "random_network"
@define(eq=False, slots=False)
class RandomLoadBalancer(RandomResource, BaseLoadBalancer):
kind: ClassVar[str] = "random_load_balancer"
first_names = [
"Aada",
"Aaliyah",
"Aarav",
"Aaron",
"Aaryan",
"Abhinav",
"Adele",
"Aidan",
"Aino",
"Alberto",
"Aleksi",
"Alexey",
"Alice",
"Alva",
"Alvaro",
"Amelia",
"Ananya",
"Anastasia",
"Andrei",
"Angeliki",
"Anika",
"Anja",
"Anna",
"Anni",
"Anton",
"Antonia",
"Aoi",
"Aradhya",
"Aria",
"Ariel",
"Arnav",
"Arthur",
"Arttu",
"Artyom",
"Athanasios",
"Ava",
"Avery",
"Beatriz",
"Birta",
"Bjarni",
"Bjorn",
"Camille",
"Carlos",
"Catalina",
"Chloe",
"Chris",
"Clara",
"Constanza",
"Cora",
"Cristobal",
"Daiki",
"Daniela",
"Devansh",
"Dhruv",
"Diego",
"Dimitrios",
"Dmitry",
"Doris",
"Dylan",
"Eetu",
"Einar",
"Eleni",
"Elias",
"Elin",
"Ella",
"Elzbieta",
"Emil",
"Enzo",
"Ethan",
"Evegny",
"Ewa",
"Felix",
"Fernanda",
"Flo",
"Grace",
"Graciela",
"Gunnar",
"Guorun",
"Hannah",
"Harini",
"Helga",
"Hugo",
"Ida",
"Ines",
"Ioannis",
"Irina",
"Isak",
"Ishan",
"Itai",
"Ivan",
"Jade",
"Jan",
"Janice",
"Javiera",
"Jocelyn",
"Joel",
"John",
"Jonas",
"Jorge",
"Jose",
"Juan",
"Juho",
"Julia",
"Julian",
"Kaito",
"Katarzyna",
"Kenta",
"Kostantina",
"Krzysztof",
"Lars",
"Lauri",
"Layla",
"Lea",
"Leevi",
"Lena",
"Leon",
"Leonie",
"Lida",
"Luca",
"Lucas",
"Lucia",
"Lukas",
"Magnus",
"Maja",
"Manon",
"Marcin",
"Marek",
"Maricel",
"Marie",
"Mario",
"Marlon",
"Marta",
"Martina",
"Matthias",
"Maxime",
"Maximilian",
"Mia",
"Mikhail",
"Milan",
"Misaki",
"Mitsuki",
"Miu",
"Miyu",
"Moe",
"Mohamed",
"Nanami",
"Naom",
"Natalia",
"Nathan",
"Navita",
"Navya",
"Nikita",
"Noah",
"Norma",
"Oceana",
"Olafur",
"Olga",
"Oliver",
"Oscar",
"Pablo",
"Paula",
"Pranav",
"Ren",
"Ricardo",
"Ridhi",
"Riko",
"Rin",
"Rishika",
"Ronald",
"Rosa",
"Rowena",
"Raffa",
"Ryan",
"Sanvi",
"Sarah",
"Sergei",
"Shaurya",
"Sho",
"Shun",
"Sigurour",
"Siiri",
"Silvia",
"Simon",
"Sota",
"Takumi",
"Tamar",
"Tatiana",
"Tejas",
"Telma",
"Tim",
"Tobi",
"Tomasz",
"Trisha",
"Ulhas",
"Valentina",
"Valeria",
"Vanessa",
"Veeti",
"Venla",
"Vicente",
"Yael",
"Yu",
"Yulia",
"Zoe",
"Zoey",
"Zofia",
]
purposes = [
["bus", "Business"],
["edu", "Education"],
["ent", "Entertainment"],
["fin", "Finance"],
["game", "Gaming"],
["gov", "Government"],
["news", "News"],
["office", "Office"],
["misc", "Other"],
["priv", "Personal"],
["shop", "Shopping"],
["soc", "Social"],
["sprt", "Sports"],
["trvl", "Travel"],
["wrk", "Work"],
["dev", "Development"],
["res", "Research"],
["web", "Webserver"],
["db", "Database"],
["stor", "Storage"],
["cloud", "Cloud"],
["host", "Hosting"],
]
instance_statuses = ["pending", "running", "shutting-down", "terminated", "stopping", "stopped"]
instance_types = {
"rnd2.tiny": [2, 2],
"rnd2.micro": [2, 4],
"rnd2.medium": [4, 8],
"rnd2.large": [8, 16],
"rnd2.xlarge": [8, 32],
"rnd2.2xlarge": [16, 64],
"rnd2.mega": [32, 128],
"rnd2.ultra": [64, 256],
}
volume_statuses = ["creating", "available", "in-use", "deleting", "deleted", "error"]
region_templates = {
"ap-northeast-": "Asia Pacific North East",
"ap-southeast-": "Asia Pacific South East",
"ap-south-": "Asia Pacific South",
"ca-central-": "Canada Central",
"eu-central-": "EU Central",
"eu-north-": "EU North",
"eu-west-": "EU West",
"sa-east-": "South America East",
"us-east-": "US East",
"us-west-": "US West",
} | /resoto_plugin_random-3.6.5-py3-none-any.whl/resoto_plugin_random/resources.py | 0.660391 | 0.400105 | resources.py | pypi |
from datetime import datetime
from attrs import define
from typing import Optional, ClassVar, Dict
from resotolib.graph import Graph
from resotolib.baseresources import (
BaseAccount,
BaseResource,
)
@define(eq=False, slots=False)
class ScarfResource:
kind: ClassVar[str] = "scarf_resource"
def delete(self, graph: Graph) -> bool:
return False
def update_tag(self, key, value) -> bool:
return False
def delete_tag(self, key) -> bool:
return False
@define(eq=False, slots=False)
class ScarfOrganization(ScarfResource, BaseAccount):
kind: ClassVar[str] = "scarf_organization"
description: Optional[str] = None
billing_email: Optional[str] = None
website: Optional[str] = None
@staticmethod
def new(data: Dict) -> BaseResource:
return ScarfOrganization(
id=data.get("name"),
description=data.get("description"),
website=data.get("website"),
billing_email=data.get("billingEmail"),
ctime=convert_date(data.get("createdAt")),
mtime=convert_date(data.get("updatedAt")),
)
@define(eq=False, slots=False)
class ScarfPackage(ScarfResource, BaseResource):
kind: ClassVar[str] = "scarf_package"
short_description: Optional[str] = None
long_description: Optional[str] = None
website: Optional[str] = None
library_type: Optional[str] = None
owner: Optional[str] = None
pull_count: int = 0
@staticmethod
def new(data: Dict) -> BaseResource:
owner = data.get("owner", "")
name = data.get("name", "")
owner_prefix = f"{owner}/" if owner else ""
if name.startswith(owner_prefix):
name = name[len(owner_prefix) :]
return ScarfPackage(
id=data.get("uuid"),
name=name,
short_description=data.get("shortDescription"),
long_description=data.get("longDescription"),
website=data.get("website"),
library_type=data.get("libraryType"),
owner=owner,
ctime=convert_date(data.get("createdAt")),
)
def convert_date(date_str: str) -> Optional[datetime]:
try:
return datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%S.%fZ")
except ValueError:
return None | /resoto_plugin_scarf-3.6.5-py3-none-any.whl/resoto_plugin_scarf/resources.py | 0.816333 | 0.169612 | resources.py | pypi |
import time
from typing import Dict, ClassVar, List, Optional
from datetime import datetime
from resotolib.baseresources import (
BaseAccount,
BaseRegion,
BaseUser,
BaseGroup,
BaseResource,
ModelReference,
)
from attrs import define, field
@define(eq=False, slots=False)
class SlackResource:
kind: ClassVar[str] = "slack_resource"
def delete(self, graph) -> bool:
return False
@define(eq=False, slots=False)
class SlackTeam(SlackResource, BaseAccount):
kind: ClassVar[str] = "slack_team"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["slack_region"],
"delete": [],
}
}
domain: str = None
email_domain: str = None
icon: str = None
@staticmethod
def new(team: Dict) -> BaseAccount:
return SlackTeam(
id=team.get("id"),
tags={},
name=team.get("name"),
domain=team.get("domain"),
email_domain=team.get("email_domain"),
icon=team.get("icon", {}).get("image_original"),
)
@define(eq=False, slots=False)
class SlackRegion(SlackResource, BaseRegion):
kind = "slack_region"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["slack_usergroup", "slack_user", "slack_conversation"],
"delete": [],
}
}
@define(eq=False, slots=False)
class SlackUser(SlackResource, BaseUser):
kind: ClassVar[str] = "slack_user"
real_name: Optional[str] = None
team_id: Optional[str] = None
deleted: bool = None
color: Optional[str] = None
tz: Optional[str] = None
tz_label: Optional[str] = None
tz_offset: Optional[int] = None
is_admin: bool = None
is_app_user: bool = None
is_bot: bool = None
is_owner: bool = None
is_primary_owner: bool = None
is_restricted: bool = None
is_ultra_restricted: bool = None
email: Optional[str] = None
phone: Optional[str] = None
status_emoji: Optional[str] = None
status_expiration: Optional[int] = None
status_text: Optional[str] = None
status_text_canonical: Optional[str] = None
title: Optional[str] = None
guest_invited_by: Optional[str] = None
first_name: Optional[str] = None
last_name: Optional[str] = None
skype: Optional[str] = None
display_name: Optional[str] = None
display_name_normalized: Optional[str] = None
image_24: Optional[str] = None
image_32: Optional[str] = None
image_48: Optional[str] = None
image_72: Optional[str] = None
image_192: Optional[str] = None
image_512: Optional[str] = None
real_name_normalized: Optional[str] = None
@staticmethod
def new(member: Dict) -> BaseUser:
profile = member.get("profile", {})
mtime = datetime.fromtimestamp(member.get("updated", time.time()))
display_name = profile.get("display_name")
return SlackUser(
id=member.get("id"),
tags={},
real_name=member.get("real_name"),
team_id=member.get("team_id"),
deleted=member.get("deleted"),
color=member.get("color"),
tz=member.get("tz"),
tz_label=member.get("tz_label"),
tz_offset=member.get("tz_offset"),
is_admin=member.get("is_admin", False),
is_app_user=member.get("is_app_user", False),
is_bot=member.get("is_bot", False),
is_owner=member.get("is_owner", False),
is_primary_owner=member.get("is_primary_owner", False),
is_restricted=member.get("is_restricted", False),
is_ultra_restricted=member.get("is_ultra_restricted", False),
mtime=mtime,
ctime=mtime,
email=profile.get("email"),
phone=profile.get("phone"),
status_emoji=profile.get("status_emoji"),
status_expiration=profile.get("status_expiration"),
status_text=profile.get("status_text"),
status_text_canonical=profile.get("status_text_canonical"),
title=profile.get("title"),
guest_invited_by=profile.get("guest_invited_by"),
first_name=profile.get("first_name"),
last_name=profile.get("last_name"),
skype=profile.get("skype"),
display_name=display_name,
name=display_name,
display_name_normalized=profile.get("display_name_normalized"),
image_24=profile.get("image_24"),
image_32=profile.get("image_32"),
image_48=profile.get("image_48"),
image_72=profile.get("image_72"),
image_192=profile.get("image_192"),
image_512=profile.get("image_512"),
real_name_normalized=profile.get("real_name_normalized"),
)
@define(eq=False, slots=False)
class SlackUsergroup(SlackResource, BaseGroup):
kind: ClassVar[str] = "slack_usergroup"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["slack_user"],
"delete": [],
}
}
auto_provision: bool = None
auto_type: Optional[str] = None
created_by: Optional[str] = None
description: Optional[str] = None
enterprise_subteam_id: Optional[str] = None
handle: Optional[str] = None
is_external: bool = None
is_subteam: bool = None
is_usergroup: bool = None
team_id: Optional[str] = None
updated_by: Optional[str] = None
user_count: Optional[int] = None
_users: List = field(factory=list, repr=False)
_channels: List = field(factory=list, repr=False)
_groups: List = field(factory=list, repr=False)
@staticmethod
def new(usergroup: Dict) -> BaseGroup:
prefs = usergroup.get("prefs", {})
return SlackUsergroup(
id=usergroup.get("id"),
name=usergroup.get("name"),
auto_provision=usergroup.get("auto_provision", False),
auto_type=usergroup.get("auto_type"),
created_by=usergroup.get("created_by"),
description=usergroup.get("description"),
enterprise_subteam_id=usergroup.get("enterprise_subteam_id"),
handle=usergroup.get("handle"),
is_external=usergroup.get("is_external", False),
is_subteam=usergroup.get("is_subteam", False),
is_usergroup=usergroup.get("is_usergroup", False),
team_id=usergroup.get("team_id"),
updated_by=usergroup.get("updated_by"),
user_count=usergroup.get("user_count"),
ctime=datetime.fromtimestamp(usergroup.get("date_create", time.time())),
mtime=datetime.fromtimestamp(usergroup.get("date_update", time.time())),
_users=usergroup.get("users", []),
_channels=prefs.get("channels", []),
_groups=prefs.get("groups", []),
)
@define(eq=False, slots=False)
class SlackConversation(SlackResource, BaseResource):
kind: ClassVar[str] = "slack_conversation"
reference_kinds: ClassVar[ModelReference] = {
"successors": {
"default": ["slack_user"],
"delete": [],
}
}
creator: Optional[str] = None
is_archived: bool = None
is_channel: bool = None
is_ext_shared: bool = None
is_general: bool = None
is_group: bool = None
is_im: bool = None
is_member: bool = None
is_mpim: bool = None
is_org_shared: bool = None
is_pending_ext_shared: bool = None
is_private: bool = None
is_shared: bool = None
name_normalized: Optional[str] = None
num_members: Optional[int] = None
parent_conversation: Optional[str] = None
pending_connected_team_ids: List[str] = None
pending_shared: List[str] = field(factory=list)
previous_names: List[str] = field(factory=list)
shared_team_ids: List[str] = field(factory=list)
unlinked: Optional[int] = None
topic: Optional[str] = None
topic_creator: Optional[str] = None
topic_last_set: Optional[int] = None
purpose: Optional[str] = None
purpose_creator: Optional[str] = None
purpose_last_set: Optional[int] = None
@staticmethod
def new(channel: Dict) -> BaseResource:
topic = channel.get("topic", {})
purpose = channel.get("purpose", {})
return SlackConversation(
id=channel.get("id"),
name=channel.get("name"),
creator=channel.get("creator"),
is_archived=channel.get("is_archived", False),
is_channel=channel.get("is_channel", False),
is_ext_shared=channel.get("is_ext_shared", False),
is_general=channel.get("is_general", False),
is_group=channel.get("is_group", False),
is_im=channel.get("is_im", False),
is_member=channel.get("is_member", False),
is_mpim=channel.get("is_mpim", False),
is_org_shared=channel.get("is_org_shared", False),
is_pending_ext_shared=channel.get("is_pending_ext_shared", False),
is_private=channel.get("is_private", False),
is_shared=channel.get("is_shared", False),
name_normalized=channel.get("name_normalized"),
num_members=channel.get("num_members"),
parent_conversation=channel.get("parent_conversation"),
pending_connected_team_ids=channel.get("pending_connected_team_ids", []),
pending_shared=channel.get("pending_shared", []),
previous_names=channel.get("previous_names", []),
shared_team_ids=channel.get("shared_team_ids", []),
unlinked=channel.get("unlinked"),
topic=topic.get("value", ""),
topic_creator=topic.get("creator"),
topic_last_set=topic.get("last_set"),
purpose=purpose.get("value", ""),
purpose_creator=purpose.get("creator"),
purpose_last_set=purpose.get("last_set"),
) | /resoto_plugin_slack-3.6.5-py3-none-any.whl/resoto_plugin_slack/resources.py | 0.795777 | 0.171963 | resources.py | pypi |
from typing import ClassVar
from attrs import define, field
from resotolib.json import value_in_path
from resotolib.types import Json
default_config: Json = {
"default": {"expiration": "24h"},
"kinds": [
"aws_ec2_instance",
"aws_vpc",
"aws_cloudformation_stack",
"aws_elb",
"aws_alb",
"aws_alb_target_group",
"aws_eks_cluster",
"aws_eks_nodegroup",
"aws_ec2_nat_gateway",
],
"accounts": {
"aws": {
"123465706934": {"name": "eng-audit"},
"123479172032": {"name": "eng-devprod"},
"123453451782": {"name": "sales-lead-gen", "expiration": "12h"},
"123415487488": {"name": "sales-hosted-lead-gen", "expiration": "8d"},
},
},
}
@define
class TagValidatorConfig:
kind: ClassVar[str] = "plugin_tagvalidator"
enabled: bool = field(
default=False,
metadata={"description": "Enable plugin?", "restart_required": True},
)
dry_run: bool = field(
default=False,
metadata={"description": "Dry run"},
)
config: Json = field(
factory=lambda: default_config,
metadata={
"description": (
"Configuration for the plugin\n"
"See https://github.com/someengineering/resoto/tree/main/plugins/tagvalidator for syntax details"
)
},
)
@staticmethod
def validate(cfg: "TagValidatorConfig") -> bool:
config = cfg.config
required_sections = ["kinds", "accounts"]
for section in required_sections:
if section not in config:
raise ValueError(f"Section '{section}' not found in config")
if not isinstance(config["kinds"], list) or len(config["kinds"]) == 0:
raise ValueError("Error in 'kinds' section")
if not isinstance(config["accounts"], dict) or len(config["accounts"]) == 0:
raise ValueError("Error in 'accounts' section")
maybe_default_expiration = value_in_path(config, ["default", "expiration"])
for cloud_id, account in config["accounts"].items():
for account_id, account_data in account.items():
if "name" not in account_data:
raise ValueError(f"Missing 'name' for account '{cloud_id}/{account_id}")
if account_data.get("expiration") is None and maybe_default_expiration is None:
raise ValueError(
f"Missing 'expiration' for account '{cloud_id}/{account_id}'"
"and no default expiration defined"
)
return True | /resoto-plugin-tagvalidator-3.6.5.tar.gz/resoto-plugin-tagvalidator-3.6.5/resoto_plugin_tagvalidator/config.py | 0.734976 | 0.200303 | config.py | pypi |
from resotolib.graph import Graph
import resotolib.logger
from resotolib.baseresources import (
BaseResource,
BaseAccount,
BaseRegion,
BaseZone,
BaseInstance,
)
from attrs import define
from typing import ClassVar
from pyVmomi import vim
from .vsphere_client import get_vsphere_client, VSphereClient
log = resotolib.logger.getLogger("resoto." + __name__)
@define(eq=False, slots=False)
class VSphereHost(BaseAccount):
kind: ClassVar[str] = "vsphere_host"
def delete(self, graph: Graph) -> bool:
return NotImplemented
@define(eq=False, slots=False)
class VSphereDataCenter(BaseRegion):
kind: ClassVar[str] = "vsphere_data_center"
def delete(self, graph: Graph) -> bool:
return NotImplemented
@define(eq=False, slots=False)
class VSphereCluster(BaseZone):
kind: ClassVar[str] = "vsphere_cluster"
def delete(self, graph: Graph) -> bool:
return NotImplemented
@define(eq=False, slots=False)
class VSphereESXiHost(BaseResource):
kind: ClassVar[str] = "vsphere_esxi_host"
def delete(self, graph: Graph) -> bool:
return NotImplemented
@define(eq=False, slots=False)
class VSphereDataStore(BaseResource):
kind: ClassVar[str] = "vsphere_datastore"
def delete(self, graph: Graph) -> bool:
return NotImplemented
@define(eq=False, slots=False)
class VSphereDataStoreCluster(BaseResource):
kind: ClassVar[str] = "vsphere_datastore_cluster"
def delete(self, graph: Graph) -> bool:
return NotImplemented
@define(eq=False, slots=False)
class VSphereResourcePool(BaseResource):
kind: ClassVar[str] = "vsphere_resource_pool"
def delete(self, graph: Graph) -> bool:
return NotImplemented
@define(eq=False, slots=False)
class VSphereResource:
kind: ClassVar[str] = "vsphere_resource"
def _vsphere_client(self) -> VSphereClient:
return get_vsphere_client()
@define(eq=False, slots=False)
class VSphereInstance(BaseInstance, VSphereResource):
kind: ClassVar[str] = "vsphere_instance"
def _vm(self):
return self._vsphere_client().get_object([vim.VirtualMachine], self.name)
def delete(self, graph: Graph) -> bool:
if self._vm() is None:
log.error(f"Could not find vm name {self.name} with id {self.id}")
log.debug(f"Deleting resource {self.id} in account {self.account(graph).id} region {self.region(graph).id}")
if self._vm().runtime.powerState == "poweredOn":
task = self._vm().PowerOffVM_Task()
self._vsphere_client().wait_for_tasks([task])
log.debug(f"Task finished - state: {task.info.state}")
log.info(f"Destroying VM {self.id} with name {self.name}")
task = self._vm().Destroy_Task()
self._vsphere_client().wait_for_tasks([task])
log.debug(f"Task finished - state: {task.info.state}")
return True
def update_tag(self, key, value) -> bool:
log.debug(f"Updating or setting tag {key}: {value} on resource {self.id}")
self._vm().setCustomValue(key, value)
return True
def delete_tag(self, key) -> bool:
log.debug(f"Deleting tag {key} on resource {self.id}")
self._vm().setCustomValue(key, "")
return True
@define(eq=False, slots=False)
class VSphereTemplate(BaseResource, VSphereResource):
kind: ClassVar[str] = "vsphere_template"
def _get_default_resource_pool(self) -> vim.ResourcePool:
return self._vsphere_client().get_object([vim.ResourcePool], "Resources")
def _template(self):
return self._vsphere_client().get_object([vim.VirtualMachine], self.name)
def delete(self, graph: Graph) -> bool:
if self._template() is None:
log.error(f"Could not find vm name {self.name} with id {self.id}")
log.debug(f"Deleting resource {self.id} in account {self.account(graph).id} region {self.region(graph).id}")
log.debug(f"Mark template {self.id} as vm")
try:
self._template().MarkAsVirtualMachine(host=None, pool=self._get_default_resource_pool())
except vim.fault.NotFound:
log.warning(f"Template {self.name} ({self.id}) not found - expecting we're done")
return True
except Exception as e:
log.exception(f"Unexpected error: {e}")
return False
log.info(f"Destroying Template {self.id} with name {self.name}")
task = self._template().Destroy_Task()
self._vsphere_client().wait_for_tasks([task])
log.debug(f"Task finished - state: {task.info.state}")
return True
def update_tag(self, key, value) -> bool:
return NotImplemented
def delete_tag(self, key) -> bool:
return NotImplemented | /resoto_plugin_vsphere-3.6.5-py3-none-any.whl/resoto_plugin_vsphere/resources.py | 0.592667 | 0.154312 | resources.py | pypi |
# `resotometrics`
Resoto Prometheus exporter
## Table of contents
* [Overview](#overview)
* [Usage](#usage)
* [Details](#details)
* [Example](#example)
* [Taking it one step further](#taking-it-one-step-further)
* [Contact](#contact)
* [License](#license)
## Overview
`resotometrics` takes [`resotocore`](../resotocore/) graph data and runs aggregation functions on it. Those aggregated metrics
are then exposed in a [Prometheus](https://prometheus.io/) compatible format. The default TCP port is `9955` but
can be changed using the `resotometrics.web_port` config attribute.
More information can be found below and in [the docs](https://resoto.com/docs/concepts/components/metrics).
## Usage
`resotometrics` uses the following commandline arguments:
```
--subscriber-id SUBSCRIBER_ID
Unique subscriber ID (default: resoto.metrics)
--override CONFIG_OVERRIDE [CONFIG_OVERRIDE ...]
Override config attribute(s)
--resotocore-uri RESOTOCORE_URI
resotocore URI (default: https://localhost:8900)
--verbose, -v Verbose logging
--quiet Only log errors
--psk PSK Pre-shared key
--ca-cert CA_CERT Path to custom CA certificate file
--cert CERT Path to custom certificate file
--cert-key CERT_KEY Path to custom certificate key file
--cert-key-pass CERT_KEY_PASS
Passphrase for certificate key file
--no-verify-certs Turn off certificate verification
```
ENV Prefix: `RESOTOMETRICS_`
Every CLI arg can also be specified using ENV variables.
For instance the boolean `--verbose` would become `RESOTOMETRICS_VERBOSE=true`.
Once started `resotometrics` will register for `generate_metrics` core events. When such an event is received it will
generate Resoto metrics and provide them at the `/metrics` endpoint.
A prometheus config could look like this:
```
scrape_configs:
- job_name: "resotometrics"
static_configs:
- targets: ["localhost:9955"]
```
## Details
Resoto core supports aggregated queries to produce metrics. Our common library [`resotolib`](../resotolib/) define a number of base resources that are common to a lot of cloud proviers, like say compute instances, subnets, routers, load balancers, and so on. All of those ship with a standard set of metrics specific to each resource.
For example, instances have CPU cores and memory, so they define default metrics for those attributes. Right now metrics are hard coded and read from the base resources, but future versions of Resoto will allow you to define your own metrics in `resotocore` and have `resotometrics` export them.
For right now you can use the aggregate API at `{resotocore}:8900/graph/{graph}/reported/search/aggregate` or the `aggregate` CLI command to generate your own metrics. For API details check out the `resotocore` API documentation as well as the Swagger UI at `{resotocore}:8900/api-doc/`.
In the following we will be using the Resoto shell `resh` and the `aggregate` command.
### Example
Enter the following commands into `resh`
```
search is(instance) | aggregate /ancestors.cloud.reported.name as cloud, /ancestors.account.reported.name as account, /ancestors.region.reported.name as region, instance_type as type : sum(1) as instances_total, sum(instance_cores) as cores_total, sum(instance_memory*1024*1024*1024) as memory_bytes
```
Here is the same query with line feeds for readability (can not be copy'pasted)
```
search is(instance) |
aggregate
/ancestors.cloud.reported.name as cloud,
/ancestors.account.reported.name as account,
/ancestors.region.reported.name as region,
instance_type as type :
sum(1) as instances_total,
sum(instance_cores) as cores_total,
sum(instance_memory*1024*1024*1024) as memory_bytes
```
If your graph contains any compute instances the resulting output will look something like this
```
---
group:
cloud: aws
account: someengineering-platform
region: us-west-2
type: m5.2xlarge
instances_total: 6
cores_total: 24
memory_bytes: 96636764160
---
group:
cloud: aws
account: someengineering-platform
region: us-west-2
type: m5.xlarge
instances_total: 8
cores_total: 64
memory_bytes: 257698037760
---
group:
cloud: gcp
account: someengineering-dev
region: us-west1
type: n1-standard-4
instances_total: 12
cores_total: 48
memory_bytes: 193273528320
```
Let us dissect what we've written here:
- `search is(instance)` fetch all the resources that inherit from base kind `instance`. This would be compute instances like `aws_ec2_instance` or `gcp_instance`.
- `aggregate /ancestors.cloud.reported.name as cloud, /ancestors.account.reported.name as account, /ancestors.region.reported.name as region, instance_type as type` aggregate the instance metrics by `cloud`, `account`, and `region` name as well as `instance_type` (think `GROUP_BY` in SQL).
- `sum(1) as instances_total, sum(instance_cores) as cores_total, sum(instance_memory*1024*1024*1024) as memory_bytes` sum up the total number of instances, number of instance cores and memory. The later is stored in GB and here we convert it to bytes as is customary in Prometheus exporters.
### Taking it one step further
```
search is(instance) and instance_status = running | aggregate /ancestors.cloud.reported.name as cloud, /ancestors.account.reported.name as account, /ancestors.region.reported.name as region, instance_type as type : sum(/ancestors.instance_type.reported.ondemand_cost) as instances_hourly_cost_estimate
```
Again the same query with line feeds for readability (can not be copy'pasted)
```
search is(instance) and instance_status = running |
aggregate
/ancestors.cloud.reported.name as cloud,
/ancestors.account.reported.name as account,
/ancestors.region.reported.name as region,
instance_type as type :
sum(/ancestors.instance_type.reported.ondemand_cost) as instances_hourly_cost_estimate
```
Outputs something like
```
---
group:
cloud: gcp
account: maestro-229419
region: us-central1
type: n1-standard-4
instances_hourly_cost_estimate: 0.949995
```
What did we do here? We told Resoto to find all resource of type compute instance (`search is(instance)`) with a status of `running` and then merge the result with ancestors (parents and parent parents) of type `cloud`, `account`, `region` and now also `instance_type`.
Let us look at two things here. First, in the previous example we already aggregated by `instance_type`. However this was the string attribute called `instance_type` that is part of every instance resource and contains strings like `m5.xlarge` (AWS) or `n1-standard-4` (GCP).
Example
```
> search is(instance) | tail -1 | format {kind} {name} {instance_type}
aws_ec2_instance i-039e06bb2539e5484 t2.micro
```
What we did now was ask Resoto to go up the graph and find the directly connected resource of kind `instance_type`.
An `instance_type` resource looks something like this
```
> search is(instance_type) | tail -1 | dump
reported:
kind: aws_ec2_instance_type
id: t2.micro
tags: {}
name: t2.micro
instance_type: t2.micro
instance_cores: 1
instance_memory: 1
ondemand_cost: 0.0116
ctime: '2021-09-28T13:10:08Z'
```
As you can see, the instance type resource has a float attribute called `ondemand_cost` which is the hourly cost a cloud provider charges for this particular type of compute instance. In our aggregation query we now sum up the hourly cost of all currently running compute instances and export them as a metric named `instances_hourly_cost_estimate`. If we now export this metric into a timeseries DB like Prometheus we are able to plot our instance cost over time.
This is the core functionality `resotometrics` provides.
## Contact
If you have any questions feel free to [join our Discord](https://discord.gg/someengineering) or [open a GitHub issue](https://github.com/someengineering/resoto/issues/new).
## License
```
Copyright 2023 Some Engineering Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
```
| /resotometrics-3.6.5.tar.gz/resotometrics-3.6.5/README.md | 0.629091 | 0.936692 | README.md | pypi |
from .errors import DoesNotExist, ValidationError, DataConflictError, AuthorizationError
from .link import LinkHolder
class ResourceContainer(object):
def __init__(self, entry_point, resource_interface):
self._entry_point = entry_point
self._res = resource_interface
class ResourceCollection(ResourceContainer):
"""
The entity that represents a pile of resources.
>>> student_collection = entry_point.get_resource(Student)
The collection is iterable:
>>> for student in student_collection:
>>> ...
If :meth:`Resource.get_uris <resource_api.interfaces.Resource.get_uris>` is implemented to return an
indexable entity the collection elements can be accessed by index as well:
>>> student = student_collection[15]
"""
def __init__(self, entry_point, resource_interface, params=None):
super(ResourceCollection, self).__init__(entry_point, resource_interface)
self._params = params or {}
self._items = self._iter_items = None
def _get(self, pk):
return ResourceInstance(self._entry_point, self._res, pk)
def __iter__(self):
if self._items is None:
if not self._res.can_get_uris(self._entry_point.user):
raise AuthorizationError("Resource collection retrivial is not allowed")
params = self._res.query_schema.deserialize(self._params, validate_required_constraint=False,
with_errors=False)
self._items = self._res.get_uris(self._entry_point.user, params)
self._iter_items = iter(self._items)
return self
def __getitem__(self, key):
self.__iter__()
target_pk = self._items[key]
return self._get(target_pk)
def __len__(self):
self.__iter__()
return len(self._items)
def next(self):
return self._get(self._iter_items.next())
def filter(self, params=None):
"""
Filtering options can be applied to collections to return new collections that contain a subset of original
items:
*NOTE*: filtering operations applied to root collections return normal collections
>>> student_collection = entry_point.get_resource(Student)
>>> new_collection = student_collection.filter(params={"name__startswith": "Abr"})
"""
new_params = {}
new_params.update(self._params)
if params:
new_params.update(params)
return ResourceCollection(self._entry_point, self._res, new_params)
def count(self):
""" Returns count of all items within the system that satisfy filtering criterias.
NOTE: :code:`len(collection)` is supposed to return the same result as :code:`collection.count()`. The key
difference between them is that :code:`len` needs to fetch all items in the collection meanwhile
:code:`collection.count()` relies on
:meth:`Resource.get_count <resource_api.interfaces.Resource.get_count>`
>>> len(student_collection)
4569
>>> student_collection.count()
4569
"""
if not self._res.can_get_uris(self._entry_point.user):
raise AuthorizationError("Resource collection count retrivial is not allowed")
params = self._res.query_schema.deserialize(self._params, validate_required_constraint=False,
with_errors=False)
return self._res.get_count(self._entry_point.user, params)
def serialize(self):
rval = []
for item in self:
rval.append(item.serialize_pk())
return rval
class RootResourceCollection(ResourceCollection):
"""
Root resource collection is actually a normal resource collection with two extra methods: *create* and *get*.
"""
def get(self, pk):
"""
>>> student_collection = entry_point.get_resource(Student)
>>> existing_student = student_collection.get("john@example.com")
"""
def _err():
raise DoesNotExist("Resource with pk %r does not exist." % pk)
try:
pk = self._res.UriPolicy.deserialize(pk)
if not self._res.exists(self._entry_point.user, pk):
_err()
except ValidationError, msg:
raise DoesNotExist("PK validation failed: %s" % msg)
if not self._res.can_discover(self._entry_point.user, pk):
_err()
return ResourceInstance(self._entry_point, self._res, pk)
def create(self, data, link_data=None):
"""
>>> student_collection = entry_point.get_resource(Student)
>>> new_student = student_collection.create({"first_name": "John",
>>> "last_name": "Smith",
>>> "email": "foo@bar.com",
>>> "birthday": "1987-02-21T22:22:22"},
>>> {"courses": [{"@target": "Maths", "grade": 4},
>>> {"@target": "Sports"}]})
"""
data = self._res.schema.deserialize(data)
if not self._res.can_create(self._entry_point.user, data):
raise AuthorizationError("Resource creation is not allowed")
rval = ResourceInstance(self._entry_point, self._res, None)
readonly = self._res.schema.find_fields(readonly=True)
intersection = readonly.intersection(set(data.keys()))
if intersection:
raise ValidationError("Readonly fields can not be set: %s" % ", ".join(intersection))
valid_link_data = rval.links._validate(link_data)
pk = self._res.UriPolicy.generate_pk(data, link_data)
if pk is None: # DAL has to generate PK in the UriPolicy instance didn't
pk = self._res.create(self._entry_point.user, pk, data)
else:
if self._res.exists(self._entry_point.user, pk):
raise DataConflictError("Resource with PK %r already exists" % pk)
self._res.create(self._entry_point.user, pk, data)
rval._set_pk(pk)
rval.links._set(valid_link_data)
return rval
class ResourceInstance(ResourceContainer):
"""
Whenever :class:`creating new or fetching existing <resource_api.resource.RootResourceCollection>` resources
resource instances are returned. Resource instances are also returned whenever iterating over
:class:`resource collections <resource_api.resource.ResourceCollection>`.
"""
def __init__(self, entry_point, resource_interface, pk):
super(ResourceInstance, self).__init__(entry_point, resource_interface)
self._pk = pk
self._links = LinkHolder(entry_point, resource_interface, pk)
def _set_pk(self, pk):
""" NOTE: is used only AFTER resource creation """
self._pk = pk
self._links._set_pk(pk)
@property
def links(self):
""" Returns a :class:`link holder <resource_api.link.LinkHolder>` """
return self._links
@property
def data(self):
""" Returns data associated with the resource
>>> student.data
{"first_name": "John", "last_name": "Smith", "email": "foo@bar.com", "birthday": "1987-02-21T22:22:22"}
"""
saved_data = self._res.get_data(self._entry_point.user, self._pk)
if not self._res.can_get_data(self._entry_point.user, self._pk, saved_data):
raise AuthorizationError("Resource fetching is not allowed")
return saved_data
@property
def pk(self):
""" Returns PK of the resource
>>> student.pk
"foo@bar.com"
"""
return self._pk
def update(self, data):
""" Changes specified fields of the resource
>>> student.update({"first_name": "Looper"})
>>> student.data
{"first_name": "Looper", "last_name": "Smith", "email": "foo@bar.com", "birthday": "1987-02-21T22:22:22"}
"""
data = self._res.schema.deserialize(data, validate_required_constraint=False)
if not self._res.can_update(self._entry_point.user, self._pk, data):
raise AuthorizationError("Resource updating is not allowed")
unchangeable = self._res.schema.find_fields(readonly=True, changeable=False)
intersection = unchangeable.intersection(set(data.keys()))
if intersection:
raise ValidationError("Unchangeable fields: %s" % ", ".join(intersection))
self._res.update(self._entry_point.user, self._pk, data)
def delete(self):
""" Removes the resource
>>> student.delete()
>>> student.data
...
DoesNotExist: ...
"""
if not self._res.can_delete(self._entry_point.user, self._pk):
raise AuthorizationError("Resource deletion is not allowed")
self.links._clear()
self._res.delete(self._entry_point.user, self._pk)
def serialize(self):
return self._res.schema.serialize(self.data)
def serialize_pk(self):
return self._res.UriPolicy.serialize(self.pk) | /resource-api-3.1.1.tar.gz/resource-api-3.1.1/resource_api/resource.py | 0.921034 | 0.199854 | resource.py | pypi |
import inspect
from abc import ABCMeta, abstractmethod, abstractproperty
from .schema import Schema
from .errors import ResourceDeclarationError
class BaseMetaClass(ABCMeta):
def __init__(cls, name, bases, dct):
super(BaseMetaClass, cls).__init__(name, bases, dct)
class ResourceSchema(cls.Schema, Schema):
pass
cls.Schema = ResourceSchema
class QuerySchema(cls.QuerySchema, Schema):
pass
cls.QuerySchema = QuerySchema
class BaseInterface(object):
__metaclass__ = BaseMetaClass
@classmethod
def get_name(cls):
return cls.__module__ + "." + cls.__name__
class Schema:
pass
class QuerySchema:
pass
class Meta:
pass
def __str__(self):
return self.get_name()
def __init__(self, context):
"""
context (object)
entity that is supposed to hold DAL (data access layer) related functionality like database connections,
network sockets, etc.
"""
self.schema = self.Schema()
self.query_schema = self.QuerySchema()
self.meta = self.Meta()
self.context = context
# Readonly fields cannot be required and have default values
for field_name in self.schema.find_fields(readonly=True):
self.schema._required_fields.discard(field_name)
self.schema._defaults.pop(field_name, None)
self.schema.fields[field_name].default = None
self.schema.fields[field_name].required = False
def get_schema(self):
meta = {}
for key in dir(self.Meta):
if not key.startswith("_"):
meta[key] = getattr(self.Meta, key)
if meta:
return {"meta": meta}
else:
return {}
class AbstractUriPolicy(object):
"""
Defines a way to generate `URI <http://en.wikipedia.org/wiki/Uniform_resource_identifier>`_ based on data that was
passed when creating the resource.
"""
__metaclass__ = ABCMeta
def __init__(self, resource_instance):
"""
resource_instance (Resource instance)
entity that can be used to access previously created items
"""
self._resource_instance = resource_instance
@abstractproperty
def type(self):
""" A string that would give a hint to the client which PK policy is in use """
@abstractmethod
def generate_pk(self, data, link_data=None):
""" Generates a PK based on input data
data (dict):
the same data that is passed to Resource's *create* method
link_data (dict):
the same link_data that is passed to Resource's *create* method
@return
generated PK
"""
@abstractmethod
def deserialize(self, pk):
""" Transforms data sent over the wire into sth. usable inside DAL
pk
PK value as it comes over the wire - e.g. string in case of HTTP
@return
PK transformed to the data type expected to by DAL in order to fetch data
"""
@abstractmethod
def serialize(self, pk):
""" Transforms value into sth. ready to transfer over the wire
pk
PK value used within DAL to identify stored entries
@return
PK transformed into something that can be sent over the wire - e.g. string in case of HTTP
"""
def get_schema(self):
""" Returns meta information (dict) to be included into resource's schema """
return {
"description": self.__doc__,
"type": self.type
}
class PkUriPolicy(AbstractUriPolicy):
""" Uses value of a field marked as "pk=True" as resource's URI """
def __init__(self, resource_instance):
super(PkUriPolicy, self).__init__(resource_instance)
def _err(msg):
raise ResourceDeclarationError(resource_instance.__class__, msg)
found = resource_instance.schema.find_fields(pk=True)
if len(found) == 0:
_err("PK field is not defined")
elif len(found) == 1:
self._pk_name = list(found)[0]
self._pk_field = resource_instance.schema.fields[self._pk_name]
else:
_err("Multiple PKs found: %s" % ", ".join(found))
@property
def type(self):
return "pk_policy"
def generate_pk(self, data, link_data=None):
return data.get(self._pk_name)
def deserialize(self, pk):
return self._pk_field.deserialize(pk)
def serialize(self, pk):
return self._pk_field.serialize(pk)
class ResourceMetaClass(BaseMetaClass):
def __init__(cls, name, bases, dct):
super(ResourceMetaClass, cls).__init__(name, bases, dct)
for field_name, field in cls.iter_links():
field.source = cls.get_name()
field.name = field_name
class Resource(BaseInterface):
""" Represents entity that is supposed to be exposed via public interface
Methods have the following arguments:
pk
PK of exisiting resource
data (dict)
information to be stored within the resource
params (dict)
extra parameters to be used for collection filtering
user (object)
entity that corresponds to the user that performs certain operation on the resource
"""
__metaclass__ = ResourceMetaClass
UriPolicy = PkUriPolicy
def __init__(self, context):
super(Resource, self).__init__(context)
self.UriPolicy = self.UriPolicy(self)
self.links = self.Links()
__init__.__doc__ = BaseInterface.__init__.__doc__
@classmethod
def iter_links(cls):
for field_name in dir(cls.Links):
if field_name.startswith("_"):
continue
link_class = getattr(cls.Links, field_name)
if not inspect.isclass(link_class) or not issubclass(link_class, Link):
continue
yield field_name, link_class
class Links:
pass
def get_schema(self):
rval = {
"description": self.__doc__,
"schema": self.schema.get_schema(),
"uri_policy": self.UriPolicy.get_schema()
}
rval.update(super(Resource, self).get_schema())
query_schema = self.query_schema.get_schema()
if query_schema:
rval["query_schema"] = query_schema
links = dict([(link_name, getattr(self.links, link_name).get_schema()) for link_name, _ in self.iter_links()])
if links:
rval["links"] = links
return rval
# Accessors
@abstractmethod
def exists(self, user, pk):
""" Returns True if the resource exists """
@abstractmethod
def create(self, user, pk, data):
""" Creates a new instance"""
@abstractmethod
def update(self, user, pk, data):
""" Updates specified fields of a given instance """
@abstractmethod
def get_data(self, user, pk):
""" Returns fields of the resource """
@abstractmethod
def delete(self, user, pk):
""" Removes the resource """
@abstractmethod
def get_uris(self, user, params=None):
""" Returns an iterable over primary keys """
@abstractmethod
def get_count(self, user, params=None):
""" Returns total amount of items that fit filtering criterias """
# AUTH methods
def can_get_data(self, user, pk, data):
""" Returns only the fields that user is allowed to fetch """
return True
def can_discover(self, user, pk):
""" Returns False if user is not allowed to know about resoure's existence """
return True
def can_get_uris(self, user):
""" Returns True if user is allowed to list the items in the collection or get their count """
return True
def can_update(self, user, pk, data):
""" Returns True if user is allowed to update the resource """
return True
def can_create(self, user, data):
""" Returns True if user is allowed to create resource with certain data """
return True
def can_delete(self, user, pk):
""" Returns True if user is allowed to delete the resource """
return True
class Link(BaseInterface):
""" Represents a relationship between two resources that needs to be exposed via public interface
Methods have the following arguments:
pk
PK of exisiting source resource (the one that defines link field)
data (dict)
extra information to be stored for this relationship
rel_pk (digit|string)
PK of exisiting target resource (the one to which we are linking to)
params (dict)
extra parameters to be used for collection filtering
user (object)
entity that corresponds to the user that performs certain operation on the link
"""
__metaclass__ = BaseMetaClass
class cardinalities:
ONE = "ONE"
MANY = "MANY"
related_link = None
cardinality = cardinalities.MANY
master = False
required = False
one_way = False
changeable = True
readonly = False
related_name = target = None
def __init__(self, context):
super(Link, self).__init__(context)
cls = self.__class__
for name in ["master", "required", "one_way", "changeable"]:
if not isinstance(getattr(cls, name), bool):
raise ResourceDeclarationError(cls, "%s must be boolean" % name)
if self.one_way:
self.master = True
elif self.related_name is None:
raise ResourceDeclarationError(cls, "related_name is not defined")
if self.target is None:
raise ResourceDeclarationError(cls, "target is not defined")
if self.required and self.cardinality == cls.cardinalities.MANY:
raise ResourceDeclarationError(cls, "Link to many can't be required")
if "." not in cls.source:
cls.source = cls.__module__ + "." + cls.source
if "." not in cls.target:
cls.target = cls.__module__ + "." + cls.target
card = cls.cardinality
if card not in [Link.cardinalities.ONE, Link.cardinalities.MANY]:
raise ResourceDeclarationError(cls, "cardinality must be ONE or MANY is %r" % card)
__init__.__doc__ = BaseInterface.__init__.__doc__
def get_schema(self):
if self.master:
schema, query_schema = self.schema, self.query_schema
else:
schema, query_schema = self.related_link.schema, self.related_link.query_schema
rval = {
"target": self.target,
"description": self.__doc__,
"schema": schema.get_schema(),
"required": self.required,
"cardinality": self.cardinality,
"changeable": self.changeable,
"readonly": self.readonly
}
rval.update(super(Link, self).get_schema())
if self.cardinality == self.cardinalities.MANY:
query_schema = query_schema.get_schema()
if query_schema:
rval["query_schema"] = query_schema
if self.one_way:
rval["one_way"] = True
else:
rval["related_name"] = self.related_name
return rval
@classmethod
def get_name(cls):
return cls.source + ":" + cls.name
@abstractmethod
def exists(self, user, pk, rel_pk):
""" Returns True if the link exists (is not nullable) """
@abstractmethod
def get_data(self, user, pk, rel_pk):
""" Returns link data """
@abstractmethod
def create(self, user, pk, rel_pk, data=None):
""" Creates a new link with optional extra data """
@abstractmethod
def update(self, user, pk, rel_pk, data):
""" Updates exisiting link with specified data """
@abstractmethod
def delete(self, user, pk, rel_pk):
""" Removes the link. If rel_pk is None - removes all links """
@abstractmethod
def get_uris(self, user, pk, params=None):
""" Returns an iterable over target primary keys """
@abstractmethod
def get_count(self, user, pk, params=None):
""" Returns total amount of items that fit filtering criterias """
# AUTH methods
def can_get_data(self, user, pk, rel_pk, data):
""" Returns only the fields that user is allowed to fetch """
return True
def can_discover(self, user, pk, rel_pk):
""" Returns False if user is not allowed to know about resoure's existence """
return True
def can_get_uris(self, user, pk):
""" Returns True if user is allowed to list the items in the collection or get their count """
return True
def can_update(self, user, pk, rel_pk, data):
""" Returns True if user is allowed to update the resource """
return True
def can_create(self, user, pk, rel_pk, data):
""" Returns True if user is allowed to create resource with certain data """
return True
def can_delete(self, user, pk, rel_pk):
""" Returns True if user is allowed to delete the resource """
return True | /resource-api-3.1.1.tar.gz/resource-api-3.1.1/resource_api/interfaces.py | 0.847148 | 0.202996 | interfaces.py | pypi |
import re
import inspect
import datetime
from copy import copy
from collections import defaultdict
import isodate
import pytz
from .errors import ValidationError, DeclarationError
class BaseField(object):
"""
Superclass for all fields
description (None|string = None)
help text to be shown in schema. This should include the reasons why this field actually needs to exist.
required (bool = False)
flag that specifes if the field has to be present
\*\*kwargs
extra parameters that are not programmatically supported
"""
verbose_name = "unknown_type"
def __init__(self, description=None, required=True, **kwargs):
self.description = description
self.kwargs = kwargs
self.required = required
def _to_python(self, val):
""" Transforms primitive data (e.g. dict, list, str, int, bool, float) to a python object """
return val
def _validate(self, val):
""" Validates incoming data against constraints defined via field declaration """
if self.required and val is None:
raise ValidationError("Value is required and thus cannot be None")
def deserialize(self, val):
""" Converts data passed over the wire or from the script into sth. to be used in python scripts """
rval = self._to_python(val)
self._validate(rval)
return rval
def serialize(self, val):
""" Converts python object into sth. that can be sent over the wire """
return val
def get_schema(self):
rval = {
"description": self.description,
"type": self.verbose_name,
"required": self.required
}
rval.update(self.kwargs)
return rval
class BaseIsoField(BaseField):
""" Represents time entity that can be either a native object or ISO 8601 datetime string.
The item is
`serialized <https://docs.python.org/2/library/datetime.html#datetime.datetime.isoformat>`_ into ISO 8601 string.
"""
def _parse(self, val):
""" Supposed to transform the value into a valid Python type using a respective isodate function """
raise NotImplementedError
def _to_python(self, val):
val = super(BaseIsoField, self)._to_python(val)
if val is None:
return None
if isinstance(val, basestring):
try:
# Parse datetime
val = self._parse(val)
except ValueError:
raise ValidationError("Datetime timestamp has to be a string in ISO 8601 format")
return val
def serialize(self, val):
if val is None:
return None
return val.isoformat()
class DateTimeField(BaseIsoField):
""" datetime object serialized into YYYY-MM-DDThh:mm:ss.sTZD.
E.g.: 2013-09-30T11:32:39.984847 """
verbose_name = "datetime"
def _parse(self, val):
return isodate.parse_datetime(val)
def _to_python(self, val):
val = super(DateTimeField, self)._to_python(val)
if val is None:
return None
# Convert to naive UTC
if hasattr(val, "tzinfo") and val.tzinfo:
val = val.astimezone(pytz.utc)
val = val.replace(tzinfo=None)
return val
class DateField(BaseIsoField):
""" date object serialized into YYYY-MM-DD.
E.g.: 2013-09-30 """
verbose_name = "date"
def _parse(self, val):
return isodate.parse_date(val)
class TimeField(BaseIsoField):
""" time object serialized into hh:mm:ssTZD.
E.g.: 11:32:39.984847 """
verbose_name = "time"
def _parse(self, val):
return isodate.parse_time(val)
def _to_python(self, val):
val = super(TimeField, self)._to_python(val)
if val is None:
return None
# Convert to naive UTC
if hasattr(val, "tzinfo") and val.tzinfo:
dt = datetime.datetime.combine(datetime.date.today(), val)
dt = dt.astimezone(pytz.utc)
dt = dt.replace(tzinfo=None)
val = dt.time()
return val
class DurationField(BaseIsoField):
""" timedelta object serialized into PnYnMnDTnHnMnS.
E.g.: P105DT9H52M49.448422S"""
verbose_name = "duration"
def _parse(self, val):
return isodate.parse_duration(val)
def serialize(self, val):
if val is None:
return None
return isodate.duration_isoformat(val)
class BaseSimpleField(BaseField):
python_type = None
def __init__(self, default=None, **kwargs):
super(BaseSimpleField, self).__init__(**kwargs)
try:
self.default = self._to_python(default)
except ValidationError, e:
raise DeclarationError("default: %s" % str(e))
def _to_python(self, val):
if val is None:
return None
try:
return self.python_type(val)
except ValueError:
raise ValidationError("Converion of value %r failed" % val)
def get_schema(self):
rval = super(BaseSimpleField, self).get_schema()
rval["default"] = self.default
return rval
class IndexableField(BaseSimpleField):
def __init__(self, choices=None, invalid_choices=None, **kwargs):
super(IndexableField, self).__init__(**kwargs)
if choices is not None:
if not isinstance(choices, (list, tuple)):
raise DeclarationError("choices has to be a list or tuple")
tempo = []
for i in xrange(len(choices)):
try:
tempo.append(self._to_python(choices[i]))
except Exception, e:
raise DeclarationError("[%d]: %s" % (i, str(e)))
choices = tempo
if invalid_choices is not None:
if not isinstance(invalid_choices, (list, tuple)):
raise DeclarationError("invalid_choices has to be a list or tuple")
tempo = []
for i in xrange(len(invalid_choices)):
try:
tempo.append(self._to_python(invalid_choices[i]))
except Exception, e:
raise DeclarationError("[%d]: %s" % (i, str(e)))
invalid_choices = tempo
if self.default is not None:
if invalid_choices and self.default in invalid_choices:
raise DeclarationError("default value is in invalid_choices")
if choices and self.default not in choices:
raise DeclarationError("default value is not in choices")
if invalid_choices and choices:
inter = set(choices).intersection(set(invalid_choices))
if inter:
raise DeclarationError("these choices are stated as both valid and invalid: %r" % inter)
self.choices, self.invalid_choices = choices, invalid_choices
def _validate(self, val):
super(IndexableField, self)._validate(val)
if val is None:
return
if self.choices and val not in self.choices:
raise ValidationError("Val %r must be one of %r" % (val, self.choices))
if self.invalid_choices and val in self.invalid_choices:
raise ValidationError("Val %r must NOT be one of %r" % (val, self.invalid_choices))
def get_schema(self):
rval = super(IndexableField, self).get_schema()
rval["choices"] = self.choices
rval["invalid_choices"] = self.invalid_choices
return rval
class DigitField(IndexableField):
""" Base class for fields that represent numbers
min_val (int|long|float = None)
Minumum threshold for incoming value
max_val (int|long|float = None)
Maximum threshold for imcoming value
"""
def __init__(self, min_val=None, max_val=None, **kwargs):
super(DigitField, self).__init__(**kwargs)
min_val = self._to_python(min_val)
max_val = self._to_python(max_val)
value_check = min_val or max_val
if self.choices is not None and value_check is not None:
raise DeclarationError("choices and min or max value limits do not make sense together")
if min_val is not None and max_val is not None:
if max_val < min_val:
raise DeclarationError("max val is less than min_val")
if self.default is not None:
if min_val is not None and self.default < min_val:
raise DeclarationError("default value is too small")
if max_val is not None and self.default > max_val:
raise DeclarationError("default value is too big")
self.min_val, self.max_val = min_val, max_val
def _to_python(self, val):
if not isinstance(val, (basestring, int, long, float, type(None))):
raise ValidationError("Has to be a digit or a string convertable to digit")
return super(DigitField, self)._to_python(val)
def _validate(self, val):
super(DigitField, self)._validate(val)
if val is None:
return
if self.min_val is not None and val < self.min_val:
raise ValidationError("Digit %r is too small. Has to be at least %r." % (val, self.min_val))
if self.max_val is not None and val > self.max_val:
raise ValidationError("Digit %r is too big. Has to be at max %r." % (val, self.max_val))
def get_schema(self):
rval = super(DigitField, self).get_schema()
rval.update({
"min_val": self.min_val,
"max_val": self.max_val
})
return rval
class IntegerField(DigitField):
""" Transforms input data that could be any number or a string value with that number into *long* """
python_type = long
verbose_name = "int"
class FloatField(DigitField):
""" Transforms input data that could be any number or a string value with that number into *float* """
python_type = float
verbose_name = "float"
class StringField(IndexableField):
""" Represents any arbitrary text
regex (string = None)
`Python regular expression <https://docs.python.org/2/library/re.html#regular-expression-syntax>`_
used to validate the string.
min_length (int = None)
Minimum size of string value
max_length (int = None)
Maximum size of string value
"""
python_type = unicode
verbose_name = "string"
def __init__(self, regex=None, min_length=None, max_length=None, **kwargs):
super(StringField, self).__init__(**kwargs)
def _set(name, transform_f, val):
if val is not None:
try:
val = transform_f(val)
except Exception, e:
raise DeclarationError("%s: %s" % (name, str(e)))
setattr(self, name, val)
val_check = min_length or max_length or regex
if self.choices and val_check is not None:
raise DeclarationError("choices and value checkers do not make sense together")
_set("regex", re.compile, regex)
_set("min_length", int, min_length)
_set("max_length", int, max_length)
def _to_python(self, val):
if not isinstance(val, (basestring, type(None))):
raise ValidationError("Has to be string")
return super(StringField, self)._to_python(val)
def _validate(self, val):
super(StringField, self)._validate(val)
if val is None:
return
if self.min_length is not None:
if len(val) < self.min_length:
raise ValidationError("Length is too small. Is %r has to be at least %r." % (len(val),
self.min_length))
if self.max_length is not None:
if len(val) > self.max_length:
raise ValidationError("Length is too small. Is %r has to be at least %r." % (len(val),
self.max_length))
reg = self.regex
if reg is not None:
if not reg.match(val):
raise ValidationError("%r did not match regexp %r" % (val, reg.pattern))
def get_schema(self):
rval = super(StringField, self).get_schema()
rval.update({
"regex": getattr(self.regex, "pattern", None),
"min_length": self.min_length,
"max_length": self.max_length})
return rval
class BooleanField(BaseSimpleField):
""" Expects only a boolean value as incoming data """
verbose_name = "boolean"
python_type = bool
def _to_python(self, val):
if not isinstance(val, (bool, type(None))):
raise ValidationError("Has to be a digit or a string convertable to digit")
return super(BooleanField, self)._to_python(val)
PRIMITIVE_TYPES_MAP = {
int: IntegerField,
float: FloatField,
str: StringField,
unicode: StringField,
basestring: StringField,
bool: BooleanField
}
def wrap_into_field(simple_type):
if not isinstance(simple_type, BaseField):
field_class = PRIMITIVE_TYPES_MAP.get(simple_type, None)
if field_class:
return field_class()
else:
return ObjectField(simple_type)
return simple_type
class ListField(BaseField):
""" Represents a collection of primitives. Serialized into a list.
item_type (python primitve|Field instance)
value is used by list field to validate individual items
python primitive are internally mapped to Field instances according to
:data:`PRIMITIVE_TYPES_MAP <resource_api.interfaces.PRIMITIVE_TYPES_MAP>`
"""
verbose_name = "list"
def __init__(self, item_type, **kwargs):
super(ListField, self).__init__(**kwargs)
self.item_type = wrap_into_field(item_type)
def deserialize(self, val):
self._validate(val)
if val is None:
return val
errors = []
rval = []
if not isinstance(val, list):
raise ValidationError("Has to be list")
for item in val:
try:
rval.append(self.item_type.deserialize(item))
except ValidationError, e:
errors.append([val.index(item), e.message])
if errors:
raise ValidationError(errors)
return rval
def get_schema(self):
rval = super(ListField, self).get_schema()
rval["schema"] = self.item_type.get_schema()
return rval
def serialize(self, val):
return [self.item_type.serialize(item) for item in val]
class ObjectField(BaseField):
""" Represents a nested document/mapping of primitives. Serialized into a dict.
schema (class):
schema to be used for validation of the nested document, it does not have to be Schema subclass - just a
collection of fields
ObjectField can be declared via two different ways.
First, if there is a reusable schema defined elsewhere:
>>> class Sample(Schema):
>>> object_field = ObjectField(ExternalSchema, required=False, description="Zen")
Second, if the field is supposed to have a unique custom schema:
>>> class Sample(Schema):
>>> object_field = ObjectField(required=False, description="Zen", schema=dict(
>>> "foo": StringField()
>>> ))
"""
verbose_name = "dict"
def __init__(self, schema, **kwargs):
super(ObjectField, self).__init__(**kwargs)
if isinstance(schema, dict):
class Tmp(Schema):
pass
for key, value in schema.iteritems():
setattr(Tmp, key, value)
schema = Tmp
elif inspect.isclass(schema) and not issubclass(schema, Schema):
class Tmp(schema, Schema):
pass
schema = Tmp
self._schema = schema()
def deserialize(self, val):
self._validate(val)
if val is None:
return val
return self._schema.deserialize(val)
def get_schema(self):
return {
"type": self.verbose_name,
"schema": self._schema.get_schema()
}
def serialize(self, val):
return self._schema.serialize(val)
class Schema(object):
""" Base class for containers that would hold one or many fields.
it has one class attribute that may be used to alter shcema's validation flow
has_additional_fields (bool = False)
If *True* it shall be possible to have extra fields inside input data that will not be validated
NOTE: when defining schemas do not use any of the following reserved keywords:
- find_fields
- deserialize
- get_schema
- serialize
- has_additional_fields
"""
has_additional_fields = False
def __init__(self, validate_required_constraint=True, with_errors=True):
self._required_fields = set()
self._defaults = {}
self._validate_required_constraint, self._with_errors = validate_required_constraint, with_errors
self.fields = {}
for field_name in dir(self):
field = getattr(self, field_name)
if not isinstance(field, BaseField):
continue
self._add_field(field_name, copy(field))
def _add_field(self, field_name, field):
setattr(self, field_name, field)
self.fields[field_name] = field
if isinstance(field, BaseField) and field.required:
self._required_fields.add(field_name)
if isinstance(field, BaseSimpleField) and field.default is not None:
self._defaults[field_name] = field.default
def find_fields(self, **kwargs):
""" Returns a set of fields where each field contains one or more specified keyword arguments """
rval = set()
for key, value in kwargs.iteritems():
for field_name, field in self.fields.iteritems():
if field.kwargs.get(key) == value:
rval.add(field_name)
return rval
def deserialize(self, data, validate_required_constraint=True, with_errors=True):
""" Validates and transforms input data into something that is used withing data access layer
data (dict)
Incoming data
validate_required_constraint (bool = True)
If *False*, schema will not validate required constraint of the fields inside
with_errors (bool = True)
If *False*, all fields that contain errors are silently excluded
@raises ValidationError
When one or more fields has errors and *with_errors=True*
"""
if not isinstance(data, dict):
raise ValidationError({"__all__": "Has to be a dict"})
transformed = dict(self._defaults)
errors = defaultdict(list)
for key, value in data.iteritems():
field = self.fields.get(key)
if field is None:
if self.has_additional_fields:
transformed[key] = value
else:
errors["__all__"].append("Field %r is not defined" % key)
continue
try:
transformed[key] = field.deserialize(value)
except ValidationError, e:
errors[key].append(e.message)
if validate_required_constraint:
for field in self._required_fields:
if transformed.get(field) is None and field not in errors:
errors[field].append("Required field is missing")
if errors and with_errors:
raise ValidationError(errors)
else:
return transformed
def get_schema(self):
""" Returns a JSONizable schema that could be transfered over the wire """
rval = {}
for field_name, field in self.fields.iteritems():
rval[field_name] = field.get_schema()
if self.has_additional_fields:
rval["has_additional_fields"] = True
return rval
def serialize(self, val):
""" Transforms outgoing data into a JSONizable dict """
rval = {}
for key, value in val.iteritems():
field = self.fields.get(key)
if field:
rval[key] = field.serialize(value)
elif self.has_additional_fields:
rval[key] = value
else:
pass
return rval | /resource-api-3.1.1.tar.gz/resource-api-3.1.1/resource_api/schema.py | 0.605333 | 0.247322 | schema.py | pypi |
from msrest.serialization import Model
class QueryResponse(Model):
"""Query result.
All required parameters must be populated in order to send to Azure.
:param total_records: Required. Number of total records matching the
query.
:type total_records: long
:param count: Required. Number of records returned in the current
response. In the case of paging, this is the number of records in the
current page.
:type count: long
:param result_truncated: Required. Indicates whether the query results are
truncated. Possible values include: 'true', 'false'
:type result_truncated: str or
~azure.mgmt.resourcegraph.models.ResultTruncated
:param skip_token: When present, the value can be passed to a subsequent
query call (together with the same query and subscriptions used in the
current request) to retrieve the next page of data.
:type skip_token: str
:param data: Required. Query output in tabular format.
:type data: ~azure.mgmt.resourcegraph.models.Table
:param facets: Query facets.
:type facets: list[~azure.mgmt.resourcegraph.models.Facet]
"""
_validation = {
'total_records': {'required': True},
'count': {'required': True},
'result_truncated': {'required': True},
'data': {'required': True},
}
_attribute_map = {
'total_records': {'key': 'totalRecords', 'type': 'long'},
'count': {'key': 'count', 'type': 'long'},
'result_truncated': {'key': 'resultTruncated', 'type': 'ResultTruncated'},
'skip_token': {'key': '$skipToken', 'type': 'str'},
'data': {'key': 'data', 'type': 'Table'},
'facets': {'key': 'facets', 'type': '[Facet]'},
}
def __init__(self, **kwargs):
super(QueryResponse, self).__init__(**kwargs)
self.total_records = kwargs.get('total_records', None)
self.count = kwargs.get('count', None)
self.result_truncated = kwargs.get('result_truncated', None)
self.skip_token = kwargs.get('skip_token', None)
self.data = kwargs.get('data', None)
self.facets = kwargs.get('facets', None) | /resource-graph-0.1.8.tar.gz/resource-graph-0.1.8/azext_resourcegraph/vendored_sdks/resourcegraph/models/query_response.py | 0.909995 | 0.452717 | query_response.py | pypi |
from msrest.serialization import Model
class QueryResponse(Model):
"""Query result.
All required parameters must be populated in order to send to Azure.
:param total_records: Required. Number of total records matching the
query.
:type total_records: long
:param count: Required. Number of records returned in the current
response. In the case of paging, this is the number of records in the
current page.
:type count: long
:param result_truncated: Required. Indicates whether the query results are
truncated. Possible values include: 'true', 'false'
:type result_truncated: str or
~azure.mgmt.resourcegraph.models.ResultTruncated
:param skip_token: When present, the value can be passed to a subsequent
query call (together with the same query and subscriptions used in the
current request) to retrieve the next page of data.
:type skip_token: str
:param data: Required. Query output in tabular format.
:type data: ~azure.mgmt.resourcegraph.models.Table
:param facets: Query facets.
:type facets: list[~azure.mgmt.resourcegraph.models.Facet]
"""
_validation = {
'total_records': {'required': True},
'count': {'required': True},
'result_truncated': {'required': True},
'data': {'required': True},
}
_attribute_map = {
'total_records': {'key': 'totalRecords', 'type': 'long'},
'count': {'key': 'count', 'type': 'long'},
'result_truncated': {'key': 'resultTruncated', 'type': 'ResultTruncated'},
'skip_token': {'key': '$skipToken', 'type': 'str'},
'data': {'key': 'data', 'type': 'Table'},
'facets': {'key': 'facets', 'type': '[Facet]'},
}
def __init__(self, *, total_records: int, count: int, result_truncated, data, skip_token: str=None, facets=None, **kwargs) -> None:
super(QueryResponse, self).__init__(**kwargs)
self.total_records = total_records
self.count = count
self.result_truncated = result_truncated
self.skip_token = skip_token
self.data = data
self.facets = facets | /resource-graph-0.1.8.tar.gz/resource-graph-0.1.8/azext_resourcegraph/vendored_sdks/resourcegraph/models/query_response_py3.py | 0.915583 | 0.572065 | query_response_py3.py | pypi |
from redis import StrictRedis
from .aspects import Aspects
import logging
"""The reporter will track and record the amount of time
spent waiting for or using locks.
It must record enough information for another system to optimise
the number of resources over time.
We use ontological tags - they have values i.e. k-v pairs
- store each unique k encountered
- store each unique v encountered for a given k
- store each unique k-v encountered
- store the timing info against this key
where timing info is acquire time, release time, duration, count etc.
"""
tags_collection = '_TAGS'
key_template = '_TAG_{key}'
key_value_template = '{key}__{value}'
def safe(thing):
return str(thing).strip().lower().replace('.', '-').replace(':', '-').replace('_', '-')
class RedisReporter:
def __init__(self, client=None, bombproof=True, logger=None, **tags):
self.client = client or StrictRedis(db=1)
self.tags = tags
self.logger = logger or logging.getLogger(__name__)
self.bombproof = bombproof
def _clear_all(self):
self.client.flushdb()
def _increment_all(self, tags, aspects):
Aspects.validate(*list(aspects))
self.client.sadd(tags_collection, *list(tags.keys()))
for key, value in tags.items():
value = safe(value)
key = safe(key)
lookup_key = key_template.format(key=key)
self.client.sadd(lookup_key, value)
store_key = key_value_template.format(key=key, value=value)
for aspect, incr in aspects.items():
if isinstance(incr, float):
self.client.hincrbyfloat(store_key, aspect, incr)
else:
self.client.hincrby(store_key, aspect, incr)
return len(tags) * len(aspects)
def report(self, tags, aspects):
try:
request = {}
request.update(self.tags)
request.update(tags)
return self._increment_all(request, aspects)
except Exception:
if not self.bombproof:
raise
else:
self.logger.error('reporting failed')
def lock_requested(self, **tags):
self.report(tags, {Aspects.lock_request_count: 1})
def lock_success(self, wait: float=None, **tags):
self.report(tags, {Aspects.lock_acquire_count: 1, Aspects.lock_acquire_wait: wait})
def lock_failed(self, **tags):
self.report(tags, {Aspects.lock_acquire_fail_count: 1})
def lock_released(self, wait: float=None, **tags):
self.report(tags, {Aspects.lock_release_count: 1, Aspects.lock_release_wait: wait})
class DummyReporter(RedisReporter):
def __init__(self, *args, **kwargs):
super().__init__(client=True, *args, **kwargs)
def report(self, tags, aspects):
return 0 | /resource_locker-1.0.0-py3-none-any.whl/resource_locker/reporter/reporter.py | 0.645679 | 0.285753 | reporter.py | pypi |
from .exceptions import RequirementNotMet
from .potential import Potential
import random
class Requirement:
def __init__(self, *potentials, need=None, **params):
self.options = dict(need=need or 1)
self.options.update(params)
self.need = self.options['need']
self._potentials = []
self._state = None
for p in potentials:
self.add_potential(p)
def __getitem__(self, item):
return self.fulfilled[item].item
def __len__(self):
return len(self.fulfilled)
def __iter__(self):
return (item.item for item in self.fulfilled)
def add_potential(self, p):
if not isinstance(p, Potential):
opts = {k: v for k, v in self.options.items() if k in {
'key_gen',
'tag_gen',
'tags',
}}
p = Potential(p, **opts)
self._potentials.append(p)
return self
@property
def is_fulfilled(self):
return self._state is True
@property
def is_rejected(self):
return self._state is False
@property
def potentials(self):
return self._potentials
def prioritised_potentials(self, known_locked):
"""Sort potentials to improve probability of successful lock
currently: [untried, known_locked]
"""
known_locked = set(known_locked)
part1 = []
part2 = []
for p in self.potentials:
if p.key in known_locked:
part2.append(p)
else:
part1.append(p)
random.shuffle(part1)
return part1 + part2
@property
def fulfilled(self):
return [p for p in self._potentials if p.is_fulfilled]
def count(self):
fulfilled = 0
rejected = 0
for potential in self._potentials:
if potential.is_fulfilled:
fulfilled += 1
if potential.is_rejected:
rejected += 1
return fulfilled, rejected
def validate(self):
fulfilled, rejected = self.count()
if fulfilled >= self.need:
self._state = True
else:
remaining = len(self._potentials) - rejected
if remaining < self.need:
self._state = False
# right now, requirements are 'AND' (mandatory ... clue is in the name)
raise RequirementNotMet(f'{remaining} potentials, (need {self.need})')
return self
def reset(self):
self._state = None
for p in self.potentials:
p.reset()
return self | /resource_locker-1.0.0-py3-none-any.whl/resource_locker/core/requirement.py | 0.791499 | 0.173428 | requirement.py | pypi |
import builtins
from ast import NameConstant
from collections import defaultdict, OrderedDict
from threading import Lock
from typing import Dict, Any, Set, Sequence
from .utils import reverse_mapping
from .wrappers import Wrapper, UnifiedImport, PatternAssignment
from .renderer import Renderer
from .exceptions import ResourceError, SemanticError, ExceptionWrapper
ScopeDict = Dict[str, Wrapper]
class Thunk:
def match(self, name):
raise NotImplementedError
class ValueThunk(Thunk):
def __init__(self, value):
assert not isinstance(value, Thunk)
self._value = value
self.ready = True
def match(self, name):
return self._value
class NodeThunk(Thunk):
def __init__(self, statement):
self.lock = Lock()
self.statement = statement
self.ready = False
self._value = None
@staticmethod
def _match(name, pattern):
if isinstance(pattern, str):
yield name == pattern, []
return
assert isinstance(pattern, tuple)
min_size = max_size = len(pattern)
for idx, entry in enumerate(pattern):
level = idx, min_size, max_size
for match, levels in NodeThunk._match(name, entry):
yield match, [level] + levels
def set(self, value):
assert not self.ready
self._value = value
self.ready = True
def match(self, name):
assert self.ready
value = self._value
# TODO: probably need a subclass
if not isinstance(self.statement, PatternAssignment):
return value
pattern = self.statement.pattern
if isinstance(pattern, str):
return value
for match, levels in self._match(name, pattern):
if match:
for idx, min_size, max_size in levels:
size = len(value)
if size < min_size:
raise ValueError('not enough values to unpack (expected %d)' % max_size)
if size > max_size:
raise ValueError('too many values to unpack (expected %d)' % max_size)
value = value[idx]
return value
# unreachable code
assert False
class Builtins(dict):
def __init__(self, injections: dict):
base = dict(vars(builtins))
common = set(base) & set(injections)
if common:
raise SemanticError('Some injections clash with builtins: ' + str(common))
base.update(injections)
super().__init__(base)
def __getitem__(self, name):
try:
return super().__getitem__(name)
except KeyError:
raise ResourceError('"%s" is not defined.' % name) from None
class Scope(OrderedDict):
def __init__(self, parent):
super().__init__()
self.parent = parent
self._statement_to_thunk = {}
self._populated = False
self._updated = False
def check_populated(self):
if self._populated:
raise RuntimeError('The scope has already been populated with live objects. Overwriting them might cause '
'undefined behaviour. Please, create another instance of ResourceManager.')
def get_name_to_statement(self):
statements = {v: k for k, v in self._statement_to_thunk.items()}
return {name: statements[thunk] for name, thunk in self.items()}
def _get_leave_time(self, parents: Dict[Wrapper, Set[Wrapper]], entry_points: Sequence[str]):
def mark_name(name):
nonlocal current
if name not in leave_time:
leave_time[name] = current
current += 1
def visit_parents(node):
visited.add(node)
for parent in parents[node]:
find_leave_time(parent)
def find_leave_time(node):
if node in visited:
return
visit_parents(node)
for name in statements[node]:
mark_name(name)
names = self.get_name_to_statement()
statements = reverse_mapping(names)
if entry_points is None:
entry_points = list(names)
else:
delta = set(entry_points) - set(names)
if delta:
raise ValueError(f'The names {delta} are not defined, and cannot be used as entry points.')
leave_time = {}
visited = set()
current = 0
# we can't just visit the first-level nodes because some of them may have several names
# we need to drop such cases
for n in entry_points:
visit_parents(names[n])
mark_name(n)
names = {n: names[n] for n in leave_time}
return names, leave_time
def render(self, parents: Dict[Wrapper, Set[Wrapper]], entry_points: Sequence[str] = None):
if self._updated:
raise RuntimeError('The scope has already been updated by live objects that cannot be rendered properly.')
# grouping imports
names, order = self._get_leave_time(parents, entry_points)
groups = defaultdict(list)
for name, statement in names.items():
groups[statement].append(name)
import_groups, imports, definitions = defaultdict(list), [], []
for statement, names in sorted(groups.items(), key=lambda x: min(order[n] for n in x[1])):
pair = sorted(names), statement
if isinstance(statement, UnifiedImport):
if statement.root:
import_groups[statement.root, statement.dots].append(pair)
else:
imports.append(pair)
else:
definitions.append(pair)
for names, statement in imports:
yield statement.to_str(names)
if imports:
yield ''
for group in import_groups.values():
names, statement = group[0]
result = statement.to_str(names)
for names, statement in group[1:]:
assert len(names) == 1
result += ', ' + statement.import_what(names[0])
yield result
if import_groups or imports:
yield '\n'
for names, statement in definitions:
yield statement.to_str(names)
def _set_thunk(self, name, thunk):
super().__setitem__(name, thunk)
def add_value(self, name, value):
assert name not in self
self._set_thunk(name, ValueThunk(value))
# TODO: unify these functions
def update_values(self, values: dict):
self._updated = True
self.check_populated()
for name, value in values.items():
statement = NameConstant(value)
if statement not in self._statement_to_thunk:
self._statement_to_thunk[statement] = ValueThunk(value)
self._set_thunk(name, self._statement_to_thunk[statement])
def update_statements(self, items):
self.check_populated()
for name, statement in items:
if statement not in self._statement_to_thunk:
self._statement_to_thunk[statement] = NodeThunk(statement)
self._set_thunk(name, self._statement_to_thunk[statement])
def __setitem__(self, key, value):
raise NotImplementedError
def __getitem__(self, name: str):
if name not in self:
return self.parent[name]
thunk = super().__getitem__(name)
if thunk.ready:
return thunk.match(name)
assert isinstance(thunk, NodeThunk)
with thunk.lock:
if not thunk.ready:
self._populated = True
thunk.set(Renderer.render(thunk.statement, self))
return thunk.match(name)
class ScopeWrapper(Dict[str, Any]):
def __init__(self, scope):
super().__init__()
self.scope = scope
def __getitem__(self, name):
try:
return self.scope[name]
except KeyError as e:
# this is needed because KeyError is converted to NameError by `eval`
raise ExceptionWrapper(e) from e
except ResourceError:
pass
if name not in self:
raise NameError(f'The name "{name}" is not defined.')
return super().__getitem__(name)
def __contains__(self, name):
return name in self.scope or super().__contains__(name) | /resource-manager-0.11.4.tar.gz/resource-manager-0.11.4/resource_manager/scope.py | 0.560253 | 0.280989 | scope.py | pypi |
import os
from collections import OrderedDict, Counter
from pathlib import Path
from typing import Union, Dict, Any, Sequence
from .semantics import Semantics
from .exceptions import ResourceError, ExceptionWrapper, SemanticError, ConfigImportError
from .scope import Scope, Builtins, ScopeWrapper
from .parser import parse_file, parse_string, flatten_assignment
PathLike = Union[Path, str]
class ResourceManager:
"""
A config interpreter.
Parameters
----------
shortcuts: dict, optional
a dict that maps keywords to paths. It is used to resolve paths during import.
injections: dict, optional
a dict with default values that will be used in case the config doesn't define them.
"""
# restricting setattr to these names
__slots__ = '_shortcuts', '_imported_configs', '_scope', '_node_parents'
def __init__(self, shortcuts: Dict[str, PathLike] = None, injections: Dict[str, Any] = None):
self._shortcuts = shortcuts or {}
self._imported_configs = {}
self._scope = Scope(Builtins(injections or {}))
self._node_parents = {}
@classmethod
def read_config(cls, path: PathLike, shortcuts: Dict[str, PathLike] = None, injections: Dict[str, Any] = None):
"""
Import the config located at `path` and return a ResourceManager instance.
Also this method adds a `__file__ = pathlib.Path(path)` value to the global scope.
Parameters
----------
path: str
path to the config to import
shortcuts: dict, optional
a dict that maps keywords to paths. It is used to resolve paths during import.
injections: dict, optional
a dict with default values that will be used in case the config doesn't define them.
Returns
-------
resource_manager: ResourceManager
"""
key = '__file__'
injections = dict(injections or {})
if key in injections:
raise ValueError('The "%s" key is not allowed in "injections".' % key)
injections[key] = Path(cls._standardize_path(path))
return cls(shortcuts, injections).import_config(path)
@classmethod
def read_string(cls, source: str, shortcuts: Dict[str, PathLike] = None, injections: Dict[str, Any] = None):
"""
Interpret the `source` and return a ResourceManager instance.
Parameters
----------
source: str
shortcuts: dict, optional
a dict that maps keywords to paths. It is used to resolve paths during import.
injections: dict, optional
a dict with default values that will be used in case the config doesn't define them.
Returns
-------
resource_manager: ResourceManager
"""
return cls(shortcuts, injections).string_input(source)
def import_config(self, path: PathLike):
"""Import the config located at `path`."""
self._update_resources(self._import(path))
return self
def string_input(self, source: str):
"""Interpret the `source`."""
self._update_resources(self._get_resources(*parse_string(source)))
return self
def update(self, **values: Any):
"""Update the scope by `values`."""
self._scope.update_values(values)
return self
def render_config(self, entry_points: Union[Sequence[str], str] = None) -> str:
"""
Generate a string containing definitions of resources in the current scope.
Parameters
----------
entry_points
the definitions that should be kept (along with their dependencies).
If None - all the definitions are rendered.
"""
if isinstance(entry_points, str):
entry_points = [entry_points]
return '\n'.join(self._scope.render(self._node_parents, entry_points)).strip() + '\n'
def save_config(self, path: str, entry_points: Union[Sequence[str], str] = None):
"""Render the config and save it to `path`. See `render_config` for details."""
with open(path, 'w') as file:
file.write(self.render_config(entry_points))
def __getattr__(self, name: str):
try:
return self.get_resource(name)
except ResourceError:
raise AttributeError('"%s" is not defined.' % name) from None
def __getitem__(self, name: str):
try:
return self.get_resource(name)
except ResourceError:
raise KeyError('"%s" is not defined.' % name) from None
def get_resource(self, name: str):
try:
return self._scope[name]
except ExceptionWrapper as e:
raise e.exception from None
def eval(self, expression: str):
"""Evaluate the given `expression`."""
try:
return eval(expression, ScopeWrapper(self._scope))
except ExceptionWrapper as e:
raise e.exception from None
def _update_resources(self, scope: OrderedDict):
self._scope.check_populated()
updated_scope = self._scope.get_name_to_statement()
updated_scope.update(scope)
self._node_parents = Semantics.analyze(updated_scope, self._scope.parent)
self._scope.update_statements(scope.items())
@staticmethod
def _standardize_path(path: PathLike) -> str:
path = str(path)
path = os.path.expanduser(path)
path = os.path.realpath(path)
return path
def _import(self, path: str) -> OrderedDict:
path = self._standardize_path(path)
if path in self._imported_configs:
return self._imported_configs[path]
# avoiding cycles
self._imported_configs[path] = {}
result = self._get_resources(*parse_file(path))
self._imported_configs[path] = result
return result
def _get_resources(self, parents, imports, definitions) -> OrderedDict:
parent_scope = OrderedDict()
for parent in parents:
parent_scope.update(self._import(parent.get_path(self._shortcuts)))
scope = []
for name, node in imports:
if node.potentially_config():
try:
local = self._import(node.get_path(self._shortcuts))
what, = node.what
if what not in local:
raise NameError('"%s" is not defined in the config it is imported from.\n' % what +
' at %d:%d in %s' % node.position)
node = local[what]
except ConfigImportError:
pass
scope.append((name, node))
scope.extend(definitions)
duplicates = [
name for name, count in
Counter(sum([flatten_assignment(pattern) for pattern, _ in scope], [])).items() if count > 1
]
if duplicates:
source_path = (imports or definitions)[0][1].source_path
raise SemanticError('Duplicate definitions found in %s:\n %s' % (source_path, ', '.join(duplicates)))
final_scope = OrderedDict(parent_scope.items())
final_scope.update(scope)
return final_scope
def __dir__(self):
return list(set(self._scope.keys()) | set(super().__dir__()))
def _ipython_key_completions_(self):
return self._scope.keys()
def __setattr__(self, name, value):
try:
super().__setattr__(name, value)
except AttributeError:
raise AttributeError('ResourceManager\'s attribute "%s" is read-only.' % name) from None
read_config = ResourceManager.read_config
read_string = ResourceManager.read_string | /resource-manager-0.11.4.tar.gz/resource-manager-0.11.4/resource_manager/manager.py | 0.859929 | 0.224395 | manager.py | pypi |
import bisect
from inspect import Parameter, Signature
from io import BytesIO
from tokenize import tokenize
from .visitor import Visitor
from .wrappers import *
def throw(message, position):
raise SyntaxError(message + '\n at %d:%d in %s' % position)
def get_substring(lines: Sequence[str], start_line: int, start_col: int, stop_line: int = None,
stop_col: int = None, lstrip: bool = True, rstrip: bool = True, keep_line: bool = True) -> str:
lines = list(lines[start_line - 1:stop_line])
lines[-1] = lines[-1][:stop_col]
lines[0] = lines[0][start_col:]
empty = 0
# remove comments
if lstrip:
line = lines[0].strip()
while line.startswith('#') or not line:
lines.pop(0)
line = lines[0].strip()
if rstrip:
line = lines[-1].strip()
while line.startswith('#') or not line:
if not line:
empty += 1
lines.pop()
line = lines[-1].strip()
body = '\n'.join(lines).strip()
if keep_line and empty > 1:
body += '\n'
return body
def tokenize_string(source):
return tokenize(BytesIO(source.encode()).readline)
def flatten_assignment(pattern):
if isinstance(pattern, str):
return [pattern]
result = []
for x in pattern:
result.extend(flatten_assignment(x))
return result
class Normalizer(Visitor):
def __init__(self, source_path):
self.source_path = source_path
def get_position(self, node: ast.AST):
return node.lineno, node.col_offset, self.source_path
def generic_visit(self, node, *args, **kwargs):
throw('This syntactic structure is not supported.', self.get_position(node))
def _prepare_function(self, node: ast.FunctionDef):
*raw_bindings, ret = node.body
if not isinstance(ret, ast.Return):
throw('Functions must end with a return statement.', self.get_position(ret))
# docstring
docstring = None
if raw_bindings and isinstance(raw_bindings[0], ast.Expr) and isinstance(raw_bindings[0].value, ast.Str):
docstring, raw_bindings = raw_bindings[0].value.s, raw_bindings[1:]
# bindings
bindings, assertions = [], []
for statement, stop in zip(raw_bindings, node.body[1:]):
value = LocalNormalizer(self.source_path).visit(statement)
if isinstance(statement, ast.Assert):
assertions.extend(value)
else:
bindings.extend(value)
# parameters
args = node.args
parameters = []
# TODO: support
if len(getattr(args, 'posonlyargs', [])) > 0:
throw('Positional-only arguments are not supported.', self.get_position(node))
for arg, default in zip(args.args, [None] * (len(args.args) - len(args.defaults)) + args.defaults):
if default is None:
default = Parameter.empty
else:
default = ExpressionWrapper(default, self.get_position(default))
parameters.append(Parameter(arg.arg, Parameter.POSITIONAL_OR_KEYWORD, default=default))
if args.vararg is not None:
parameters.append(Parameter(args.vararg.arg, Parameter.VAR_POSITIONAL))
for arg, default in zip(args.kwonlyargs, args.kw_defaults):
if default is None:
default = Parameter.empty
else:
default = ExpressionWrapper(default, self.get_position(default))
parameters.append(Parameter(arg.arg, Parameter.KEYWORD_ONLY, default=default))
if args.kwarg is not None:
parameters.append(Parameter(args.kwarg.arg, Parameter.VAR_KEYWORD))
# decorators
decorators = [ExpressionWrapper(decorator, self.get_position(decorator)) for decorator in node.decorator_list]
return node.name, Function(
Signature(parameters), docstring, bindings, ExpressionWrapper(ret.value, self.get_position(ret.value)),
decorators, assertions, node.name, self.get_position(node),
)
class LocalNormalizer(Normalizer):
def get_assignment_pattern(self, target):
assert isinstance(target.ctx, ast.Store)
if isinstance(target, ast.Name):
return target.id
if isinstance(target, ast.Starred):
throw('Starred unpacking is not supported.', self.get_position(target))
assert isinstance(target, (ast.Tuple, ast.List))
return tuple(self.get_assignment_pattern(elt) for elt in target.elts)
def visit_function_def(self, node: ast.FunctionDef):
yield self._prepare_function(node)
def visit_assert(self, node: ast.Assert):
yield AssertionWrapper(node, self.get_position(node))
def visit_assign(self, node: ast.Assign):
if len(node.targets) != 1:
throw('Assignments inside functions must have a single target.', self.get_position(node))
pattern = self.get_assignment_pattern(node.targets[0])
expression = PatternAssignment(node.value, pattern, self.get_position(node.value))
for name in flatten_assignment(pattern):
yield name, expression
class GlobalNormalizer(Normalizer):
def __init__(self, start, stop, lines, source_path):
super().__init__(source_path)
self.lines = lines
self.start = start
self.stop = stop
def visit_function_def(self, node: ast.FunctionDef):
name, func = self._prepare_function(node)
# body
body = get_substring(self.lines, *self.start, *self.stop)
for token in tokenize_string(body):
if token.string == 'def':
start = get_substring(body.splitlines(), 1, 0, *token.end)
stop = get_substring(body.splitlines(), *token.end)
assert stop.startswith(node.name)
stop = stop[len(node.name):].strip()
func.body = start, stop
break
assert func.body is not None
yield name, func
def visit_assign(self, node: ast.Assign):
position = self.get_position(node.value)
for target in node.targets:
if not isinstance(target, ast.Name):
throw('This assignment syntax is not supported.', self.get_position(target))
assert isinstance(target.ctx, ast.Store)
last_target = node.targets[-1]
body = get_substring(self.lines, last_target.lineno, last_target.col_offset, *self.stop)
assert body[:len(last_target.id)] == last_target.id
body = body[len(last_target.id):].lstrip()
assert body[0] == '='
body = body[1:].lstrip()
expression = ExpressionStatement(node.value, body, position)
for target in node.targets:
yield target.id, expression
def visit_import_from(self, node: ast.ImportFrom):
names = node.names
root = node.module.split('.')
position = self.get_position(node)
if len(names) == 1 and names[0].name == '*':
yield None, ImportStarred(root, node.level, position)
return
for alias in names:
name = alias.asname or alias.name
yield name, UnifiedImport(root, node.level, alias.name.split(','), alias.asname is not None, position)
def visit_import(self, node: ast.Import):
position = self.get_position(node)
for alias in node.names:
name = alias.asname or alias.name
yield name, UnifiedImport('', 0, alias.name.split('.'), alias.asname is not None, position)
# need this function, because in >=3.8 the function start is considered from `def` token
# rather then from the first decorator
def find_body_limits(source: str, source_path: str):
def _pos(node):
return node.lineno, node.col_offset
statements = sorted(ast.parse(source, source_path).body, key=_pos, reverse=True)
tokens = list(tokenize_string(source))
if not tokens:
return
indices = [t.start for t in tokens]
stop = tokens[-1].end
for statement in statements:
start = _pos(statement)
if isinstance(statement, ast.FunctionDef) and statement.decorator_list:
dec = statement.decorator_list[0]
start = _pos(dec)
idx = bisect.bisect_left(indices, start)
token = tokens[idx]
assert token.start == start
token = tokens[idx - 1]
assert token.string == '@'
start = token.start
yield statement, start, stop
stop = start
def parse(source: str, source_path: str):
lines = tuple(source.splitlines() + [''])
wrapped = []
for statement, start, stop in reversed(list(find_body_limits(source, source_path))):
wrapped.extend(GlobalNormalizer(start, stop, lines, source_path).visit(statement))
parents, imports, definitions = [], [], []
for name, w in wrapped:
if isinstance(w, ImportStarred):
assert name is None
if imports or definitions:
throw('Starred imports are only allowed at the top of the config.', w.position)
parents.append(w)
elif isinstance(w, UnifiedImport):
if definitions:
throw('Imports are only allowed before definitions.', w.position)
imports.append((name, w))
else:
assert isinstance(w, (Function, ExpressionStatement))
definitions.append((name, w))
return parents, imports, definitions
def parse_file(config_path):
with open(config_path, 'r') as file:
return parse(file.read(), config_path)
def parse_string(source):
return parse(source, '<string input>') | /resource-manager-0.11.4.tar.gz/resource-manager-0.11.4/resource_manager/parser.py | 0.538741 | 0.235328 | parser.py | pypi |
import ast
from typing import Iterable
from ..wrappers import AssertionWrapper, ExpressionStatement
from ..visitor import Visitor
class SemanticVisitor(Visitor):
"""Simple visitor for nodes that don't interact with the scope stack."""
# utils
def _visit_sequence(self, sequence: Iterable):
for item in sequence:
self.visit(item)
def _visit_valid(self, value):
if value is not None:
self.visit(value)
def _ignore_node(self, node):
pass
# expressions
def visit_expression_statement(self, node: ExpressionStatement):
self.visit(node.expression)
visit_pattern_assignment = visit_expression_wrapper = visit_expression_statement
# literals
visit_constant = visit_name_constant = visit_ellipsis = visit_bytes = visit_num = visit_str = _ignore_node
def visit_formatted_value(self, node):
assert node.format_spec is None
self.visit(node.value)
def visit_joined_str(self, node):
self._visit_sequence(node.values)
def visit_list(self, node: ast.List):
assert isinstance(node.ctx, ast.Load)
self._visit_sequence(node.elts)
visit_tuple = visit_list
def visit_set(self, node):
self._visit_sequence(node.elts)
def visit_dict(self, node):
self._visit_sequence(filter(None, node.keys))
self._visit_sequence(node.values)
# variables
def visit_starred(self, node: ast.Starred):
self.visit(node.value)
# expressions
def visit_unary_op(self, node: ast.UnaryOp):
self.visit(node.operand)
def visit_bin_op(self, node: ast.BinOp):
self.visit(node.left)
self.visit(node.right)
def visit_bool_op(self, node: ast.BoolOp):
self._visit_sequence(node.values)
def visit_compare(self, node: ast.Compare):
self.visit(node.left)
self._visit_sequence(node.comparators)
def visit_call(self, node: ast.Call):
self.visit(node.func)
self._visit_sequence(node.args)
self._visit_sequence(node.keywords)
self._visit_valid(getattr(node, 'starargs', None))
self._visit_valid(getattr(node, 'kwargs', None))
def visit_keyword(self, node):
self.visit(node.value)
def visit_if_exp(self, node: ast.IfExp):
self.visit(node.test)
self.visit(node.body)
self.visit(node.orelse)
def visit_attribute(self, node: ast.Attribute):
assert isinstance(node.ctx, ast.Load)
self.visit(node.value)
# subscripting
def visit_subscript(self, node: ast.Subscript):
assert isinstance(node.ctx, ast.Load)
self.visit(node.value)
self.visit(node.slice)
def visit_index(self, node):
self.visit(node.value)
def visit_slice(self, node):
self._visit_valid(node.lower)
self._visit_valid(node.upper)
self._visit_valid(node.step)
def visit_ext_slice(self, node):
self._visit_sequence(node.dims)
# statements
def visit_assertion_wrapper(self, node: AssertionWrapper):
self.visit(node.assertion.test)
if node.assertion.msg is not None:
self.visit(node.assertion.msg)
# imports
visit_unified_import = _ignore_node | /resource-manager-0.11.4.tar.gz/resource-manager-0.11.4/resource_manager/semantics/visitor.py | 0.799873 | 0.593874 | visitor.py | pypi |
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def launch_gui(csv_name='system_usage_log.csv'):
st.set_option('deprecation.showPyplotGlobalUse', False)
# Read the CSV file
data = pd.read_csv(csv_name)
# Convert the 'Time' column to datetime format
data['Time'] = pd.to_datetime(data['Time'])
# Set the 'Time' column as the index
data.set_index('Time', inplace=True)
# Plotting the data
st.title('System Usage Log')
st.subheader('Network I/O, CPU, Memory, and GPU Usage over Time')
# Select the chart type
chart_type = st.selectbox('Select Chart Type', [
'Line Chart', 'Area Chart', 'Bar Chart', 'Pie Chart', 'Scatter Plot', 'Heatmap', 'Violin Plot'])
# Select the columns to display
# Plot the selected columns
if chart_type not in ['Violin Plot']:
columns = st.multiselect('Select Columns', list(data.columns))
if columns:
if chart_type == 'Line Chart':
data[columns].plot(kind='line')
elif chart_type == 'Area Chart':
data[columns].plot(kind='area', stacked=False)
elif chart_type == 'Bar Chart':
data[columns].plot(kind='bar', stacked=False)
elif chart_type == 'Pie Chart':
# Calculate the average value for each column
average_values = data[columns].mean()
plt.pie(average_values, labels=average_values.index, autopct='%1.1f%%')
elif chart_type == 'Scatter Plot':
for column in columns:
plt.scatter(data.index, data[column], label=column)
elif chart_type == 'Heatmap':
fig, ax = plt.subplots(figsize=(10, 6))
sns.heatmap(data[columns].corr(), annot=True,
fmt=".2f", cmap='coolwarm', ax=ax)
plt.title('Correlation Heatmap')
# Display the plot using Streamlit
plt.legend()
st.pyplot()
elif chart_type == 'Violin Plot':
fig, ax = plt.subplots()
x_axis = st.selectbox('Select Variable', list(data.columns))
y_axis = st.selectbox('Select Class', list(data.columns))
if x_axis and y_axis:
sns.violinplot(x=data[x_axis], y=data[y_axis], ax=ax)
plt.xlabel(x_axis)
plt.ylabel(y_axis)
plt.title('Violin Plot')
plt.legend()
st.pyplot()
st.subheader('Summary')
statistics = {
'Minimum': data.min(),
'Maximum': data.max(),
'Average': data.mean()
}
# Create a new DataFrame with the statistics
st.write(pd.DataFrame(statistics))
# Display the raw data
st.subheader('Raw Data')
st.write(data)
if __name__ == '__main__':
launch_gui() | /resource_monitor_scanner-0.1.0.tar.gz/resource_monitor_scanner-0.1.0/resource_monitor_scanner/gui.py | 0.696165 | 0.53607 | gui.py | pypi |
# type annotations
from __future__ import annotations
from typing import Dict
# standard libs
import re
from abc import ABC
# internal libs
from ..core.extern import ExternalMetric
# public interface
__all__ = ['RocmMetric', 'RocmPercent', 'RocmMemory', 'RocmPower', 'RocmTemperature', ]
class RocmMetric(ExternalMetric, ABC):
"""Run rocm-smi to collect metrics on GPU usage."""
class RocmPercent(RocmMetric):
"""Parse rocm-smi for GPU overall usage as a percentage."""
_cmd: str = 'rocm-smi --showuse --csv'
_pattern: re.Pattern = re.compile(r'^card(\d),(\d+(?:\.\d+)?)')
@classmethod
def parse_text(cls, block: str) -> Dict[str, Dict[int, float]]:
"""Parse `rocm-smi` output."""
try:
data = {}
lines = block.strip().split('\n') # NOTE: lines[0] == 'device,GPU use (%)'
for line in lines[1:]:
index, percent = cls._pattern.match(line).groups()
data[int(index)] = float(percent)
return {'percent': data}
except Exception as error:
raise RuntimeError(f'Failed to parse output ({cls._cmd}): {error}') from error
class RocmMemory(RocmMetric):
"""Parse rocm-smi for GPU memory usage as a percentage."""
_cmd: str = 'rocm-smi --showmemuse --csv'
_pattern: re.Pattern = re.compile(r'^card(\d),(\d+(?:\.\d+)?)')
@classmethod
def parse_text(cls, block: str) -> Dict[str, Dict[int, float]]:
"""Parse `rocm-smi` output."""
try:
data = {}
lines = block.strip().split('\n') # NOTE: lines[0] == 'device,GPU use (%)'
for line in lines[1:]:
index, memory = cls._pattern.match(line).groups()
data[int(index)] = float(memory)
return {'memory': data}
except Exception as error:
raise RuntimeError(f'Failed to parse output ({cls._cmd}): {error}') from error
class RocmTemperature(RocmMetric):
"""Parse rocm-smi for GPU temperature in Celsius."""
_cmd: str = 'rocm-smi --showtemp --csv'
_pattern: re.Pattern = re.compile(r'^card(\d),(\d+(?:\.\d+)?),(\d+(?:\.\d+)?),(\d+(?:\.\d+)?)')
@classmethod
def parse_text(cls, block: str) -> Dict[str, Dict[int, float]]:
"""Parse `rocm-smi` output."""
try:
data = {}
# NOTE: lines[0] == 'device,Temperature (Sensor edge) (C),Temperature ...
lines = block.strip().split('\n')
for line in lines[1:]:
index, t_edge, t_junc, t_mem = cls._pattern.match(line).groups()
data[int(index)] = float(t_junc) # TODO: user picks which temp?
return {'temp': data}
except Exception as error:
raise RuntimeError(f'Failed to parse output ({cls._cmd}): {error}') from error
class RocmPower(RocmMetric):
"""Parse rocm-smi for GPU total power draw (in Watts)."""
_cmd: str = 'rocm-smi --showpower --csv'
_pattern: re.Pattern = re.compile(r'^card(\d),(\d+(?:\.\d+)?)')
@classmethod
def parse_text(cls, block: str) -> Dict[str, Dict[int, float]]:
"""Parse `rocm-smi` output."""
try:
data = {}
lines = block.strip().split('\n') # NOTE: lines[0] == 'device,Average Graphics Package Power (W)'
for line in lines[1:]:
index, power = cls._pattern.match(line).groups()
data[int(index)] = float(power)
return {'power': data}
except Exception as error:
raise RuntimeError(f'Failed to parse output ({cls._cmd}): {error}') from error | /resource-monitor-2.3.1.tar.gz/resource-monitor-2.3.1/monitor/contrib/rocm.py | 0.794505 | 0.367043 | rocm.py | pypi |
# type annotations
from __future__ import annotations
from typing import Dict, Type, Optional
# standard libs
import functools
from subprocess import check_output
# internal libs
from ..core.extern import ExternalMetric
from .nvidia import NvidiaPercent, NvidiaMemory, NvidiaTemperature, NvidiaPower
from .rocm import RocmPercent, RocmMemory, RocmTemperature, RocmPower
# public interface
__all__ = ['SMIData', ]
class SMIData:
"""High-level interface to external smi tool for GPU telemetry."""
@property
def percent(self) -> Dict[int, float]:
"""Current percent usage by GPU index."""
return self.get_telemetry('percent')
@property
def memory(self) -> Dict[int, float]:
"""Current memory usage by GPU index."""
return self.get_telemetry('memory')
@property
def power(self) -> Dict[int, float]:
"""Current power consumption by GPU index (in Watts)."""
return self.get_telemetry('power')
@property
def temp(self) -> Dict[int, float]:
"""Current temperature by GPU index (in Celsius)."""
return self.get_telemetry('temp')
def get_telemetry(self, metric: str) -> Dict[int, float]:
"""Current usage of `metric` by GPU index."""
provider = self.provider_map.get(self.provider).get(metric)
return provider.from_cmd().data.get(metric)
@functools.cached_property
def provider_map(self) -> Dict[str, Dict[str, Type[ExternalMetric]]]:
"""Map of query providers by vendor and resource type."""
return {
'nvidia': {
'percent': NvidiaPercent,
'memory': NvidiaMemory,
'power': NvidiaPower,
'temp': NvidiaTemperature
},
'rocm': {
'percent': RocmPercent,
'memory': RocmMemory,
'power': RocmPower,
'temp': RocmTemperature
}
}
@functools.cached_property
def provider(self) -> str:
"""Either 'nvidia' or 'rocm' if available."""
if self._check_nvidia():
return 'nvidia'
if self._check_rocm():
return 'rocm'
else:
raise RuntimeError('Neither `nvidia-smi` nor `rocm-smi` found')
def _check_nvidia(self) -> Optional[str]:
return self._check_command('nvidia-smi')
def _check_rocm(self) -> Optional[str]:
return self._check_command('rocm-smi')
@staticmethod
def _check_command(name: str) -> Optional[str]:
"""Return default output of command given `name`, None if command not found."""
try:
return check_output([name, ]).decode().strip()
except FileNotFoundError:
return None | /resource-monitor-2.3.1.tar.gz/resource-monitor-2.3.1/monitor/contrib/__init__.py | 0.919335 | 0.171789 | __init__.py | pypi |
# type annotations
from __future__ import annotations
from typing import Dict
# standard libs
from abc import ABC
# internal libs
from ..core.extern import ExternalMetric
# public interface
__all__ = ['NvidiaMetric', 'NvidiaPercent', 'NvidiaMemory', 'NvidiaPower', 'NvidiaTemperature', ]
class NvidiaMetric(ExternalMetric, ABC):
"""Status object for Nvidia GPU resource."""
class NvidiaPercent(NvidiaMetric):
"""Parse nvidia-smi for overall percent utilization."""
_cmd: str = 'nvidia-smi --format=csv,noheader,nounits --query-gpu=index,utilization.gpu -c1'
@classmethod
def parse_text(cls, block: str) -> Dict[str, Dict[int, float]]:
"""Parse `nvidia-smi` output."""
try:
data = {}
for line in block.strip().split('\n'):
index, percent = map(float, line.strip().split(', '))
data[int(index)] = percent
return {'percent': data}
except Exception as error:
raise RuntimeError(f'Failed to parse output ({cls._cmd}): {error}') from error
class NvidiaMemory(NvidiaMetric):
"""Parse nvidia-smi for memory usage."""
_cmd: str = 'nvidia-smi --format=csv,noheader,nounits --query-gpu=index,memory.used,memory.total -c1'
@classmethod
def parse_text(cls, block: str) -> Dict[str, Dict[int, float]]:
"""Parse `nvidia-smi` output."""
try:
data = {}
for line in block.strip().split('\n'):
index, current, total = map(float, line.strip().split(', '))
data[int(index)] = current / total
return {'memory': data}
except Exception as error:
raise RuntimeError(f'Failed to parse output ({cls._cmd}): {error}') from error
class NvidiaTemperature(NvidiaMetric):
"""Parse nvidia-smi for GPU temperature (in degrees C)."""
_cmd: str = 'nvidia-smi --format=csv,noheader,nounits --query-gpu=index,temperature.gpu -c1'
@classmethod
def parse_text(cls, block: str) -> Dict[str, Dict[int, float]]:
"""Parse `nvidia-smi` output."""
try:
data = {}
for line in block.strip().split('\n'):
index, temp = map(float, line.strip().split(', '))
data[int(index)] = temp
return {'temp': data}
except Exception as error:
raise RuntimeError(f'Failed to parse output ({cls._cmd}): {error}') from error
class NvidiaPower(NvidiaMetric):
"""Parse nvidia-smi for GPU total power draw (in Watts +/- 5 Watts)."""
_cmd: str = 'nvidia-smi --format=csv,noheader,nounits --query-gpu=index,power.draw -c1'
@classmethod
def parse_text(cls, block: str) -> Dict[str, Dict[int, float]]:
"""Parse `nvidia-smi` output."""
try:
data = {}
for line in block.strip().split('\n'):
index, power = map(float, line.strip().split(', '))
data[int(index)] = power
return {'power': data}
except Exception as error:
raise RuntimeError(f'Failed to parse output ({cls._cmd}): {error}') from error | /resource-monitor-2.3.1.tar.gz/resource-monitor-2.3.1/monitor/contrib/nvidia.py | 0.857067 | 0.231115 | nvidia.py | pypi |
adjectives = [
"abandoned",
"able",
"absolute",
"academic",
"acceptable",
"acclaimed",
"accomplished",
"accurate",
"aching",
"acidic",
"acrobatic",
"active",
"actual",
"adept",
"admirable",
"admired",
"adolescent",
"adorable",
"adored",
"advanced",
"adventurous",
"affectionate",
"afraid",
"aged",
"aggravating",
"aggressive",
"agile",
"agitated",
"agonizing",
"agreeable",
"ajar",
"alarmed",
"alarming",
"alert",
"alienated",
"alive",
"all",
"altruistic",
"amazing",
"ambitious",
"ample",
"amused",
"amusing",
"anchored",
"ancient",
"angelic",
"angry",
"anguished",
"animated",
"annual",
"another",
"antique",
"anxious",
"any",
"apprehensive",
"appropriate",
"apt",
"arctic",
"arid",
"aromatic",
"artistic",
"ashamed",
"assured",
"astonishing",
"athletic",
"attached",
"attentive",
"attractive",
"austere",
"authentic",
"authorized",
"automatic",
"avaricious",
"average",
"aware",
"awesome",
"awful",
"awkward",
"babyish",
"back",
"bad",
"baggy",
"bare",
"barren",
"basic",
"beautiful",
"belated",
"beloved",
"beneficial",
"best",
"better",
"bewitched",
"big",
"biodegradable",
"bitter",
"black",
"bland",
"blank",
"blaring",
"bleak",
"blind",
"blissful",
"blond",
"blue",
"blushing",
"bogus",
"boiling",
"bold",
"bony",
"boring",
"bossy",
"both",
"bouncy",
"bountiful",
"bowed",
"brave",
"breakable",
"brief",
"bright",
"brilliant",
"brisk",
"broken",
"bronze",
"brown",
"bruised",
"bubbly",
"bulky",
"bumpy",
"buoyant",
"burdensome",
"burly",
"bustling",
"busy",
"buttery",
"buzzing",
"calculating",
"calm",
"candid",
"canine",
"capital",
"carefree",
"careful",
"careless",
"caring",
"cautious",
"cavernous",
"celebrated",
"charming",
"cheap",
"cheerful",
"cheery",
"chief",
"chilly",
"chubby",
"circular",
"classic",
"clean",
"clear",
"clever",
"close",
"closed",
"cloudy",
"clueless",
"clumsy",
"cluttered",
"coarse",
"cold",
"colorful",
"colorless",
"colossal",
"comfortable",
"common",
"compassionate",
"competent",
"complete",
"complex",
"complicated",
"composed",
"concerned",
"concrete",
"confused",
"conscious",
"considerate",
"constant",
"content",
"conventional",
"cooked",
"cool",
"cooperative",
"coordinated",
"corny",
"corrupt",
"costly",
"courageous",
"courteous",
"crafty",
"crazy",
"creamy",
"creative",
"creepy",
"criminal",
"crisp",
"critical",
"crooked",
"crowded",
"cruel",
"crushing",
"cuddly",
"cultivated",
"cultured",
"cumbersome",
"curly",
"curvy",
"cute",
"cylindrical",
"damaged",
"damp",
"dangerous",
"dapper",
"daring",
"dark",
"darling",
"dazzling",
"dead",
"deadly",
"deafening",
"dear",
"dearest",
"decent",
"decimal",
"decisive",
"deep",
"defenseless",
"defensive",
"defiant",
"deficient",
"definite",
"definitive",
"delayed",
"delectable",
"delicious",
"delightful",
"delirious",
"demanding",
"dense",
"dental",
"dependable",
"dependent",
"descriptive",
"deserted",
"detailed",
"determined",
"devoted",
"different",
"difficult",
"digital",
"diligent",
"dim",
"dimpled",
"dimwitted",
"direct",
"dirty",
"disastrous",
"discrete",
"disfigured",
"disguised",
"disgusting",
"dishonest",
"disloyal",
"dismal",
"distant",
"distinct",
"distorted",
"distracting",
"dizzy",
"dopey",
"doting",
"double",
"downright",
"drab",
"drafty",
"dramatic",
"dreary",
"droopy",
"dry",
"dual",
"dull",
"dutiful",
"each",
"eager",
"early",
"earnest",
"easy",
"ecstatic",
"edible",
"educated",
"elaborate",
"elastic",
"elated",
"elderly",
"electric",
"elegant",
"elementary",
"elliptical",
"embarrassed",
"embellished",
"eminent",
"emotional",
"empty",
"enchanted",
"enchanting",
"energetic",
"enlightened",
"enormous",
"enraged",
"entire",
"envious",
"equal",
"equatorial",
"essential",
"esteemed",
"ethical",
"euphoric",
"even",
"evergreen",
"everlasting",
"every",
"evil",
"exalted",
"excellent",
"excitable",
"excited",
"exciting",
"exemplary",
"exhausted",
"exotic",
"expensive",
"experienced",
"expert",
"extraneous",
"extroverted",
"fabulous",
"failing",
"faint",
"fair",
"faithful",
"fake",
"false",
"familiar",
"famous",
"fancy",
"fantastic",
"faraway",
"fast",
"fat",
"fatal",
"fatboy",
"fatherly",
"favorable",
"favorite",
"fearful",
"fearless",
"feisty",
"feline",
"female",
"feminine",
"few",
"fickle",
"filthy",
"fine",
"finished",
"firm",
"first",
"firsthand",
"fitting",
"fixed",
"flaky",
"flamboyant",
"flashy",
"flat",
"flawed",
"flawless",
"flickering",
"flimsy",
"flippant",
"flowery",
"fluffy",
"fluid",
"flustered",
"focused",
"fond",
"foolhardy",
"foolish",
"forceful",
"forked",
"formal",
"forsaken",
"forthright",
"fortunate",
"fragrant",
"frail",
"frank",
"frayed",
"free",
"french",
"frequent",
"fresh",
"friendly",
"frightened",
"frightening",
"frigid",
"frilly",
"frisky",
"frivolous",
"frizzy",
"front",
"frosty",
"frozen",
"frugal",
"fruitful",
"full",
"fumbling",
"functional",
"funny",
"fussy",
"fuzzy",
"gargantuan",
"gaseous",
"general",
"generous",
"gentle",
"genuine",
"giant",
"giddy",
"gifted",
"gigantic",
"giving",
"glamorous",
"glaring",
"glass",
"gleaming",
"gleeful",
"glistening",
"glittering",
"gloomy",
"glorious",
"glossy",
"glum",
"golden",
"good",
"gorgeous",
"graceful",
"gracious",
"grand",
"grandiose",
"granular",
"grateful",
"grave",
"gray",
"great",
"greedy",
"green",
"gregarious",
"grim",
"grimy",
"gripping",
"grizzled",
"gross",
"grotesque",
"grouchy",
"grounded",
"growing",
"growling",
"grown",
"grubby",
"gruesome",
"grumpy",
"guilty",
"gullible",
"gummy",
"hairy",
"half",
"handmade",
"handsome",
"handy",
"happy",
"hard",
"harmful",
"harmless",
"harmonious",
"harsh",
"hasty",
"hateful",
"haunting",
"healthy",
"heartfelt",
"hearty",
"heavenly",
"heavy",
"hefty",
"helpful",
"helpless",
"hidden",
"hideous",
"high",
"hilarious",
"hoarse",
"hollow",
"homely",
"honest",
"honorable",
"honored",
"hopeful",
"horrible",
"hospitable",
"hot",
"huge",
"humble",
"humiliating",
"humming",
"humongous",
"hungry",
"hurtful",
"husky",
"icky",
"icy",
"ideal",
"idealistic",
"identical",
"idiotic",
"idle",
"idolized",
"ignorant",
"illegal",
"illiterate",
"illustrious",
"imaginary",
"imaginative",
"immaculate",
"immaterial",
"immediate",
"immense",
"impartial",
"impassioned",
"impeccable",
"imperfect",
"imperturbable",
"impish",
"impolite",
"important",
"impossible",
"impractical",
"impressionable",
"impressive",
"improbable",
"impure",
"inborn",
"incomparable",
"incompatible",
"incomplete",
"inconsequential",
"incredible",
"indelible",
"indolent",
"inexperienced",
"infamous",
"infantile",
"infatuated",
"inferior",
"infinite",
"informal",
"innocent",
"insecure",
"insidious",
"insignificant",
"insistent",
"instructive",
"insubstantial",
"intelligent",
"intent",
"intentional",
"interesting",
"internal",
"international",
"intrepid",
"ironclad",
"irresponsible",
"irritating",
"itchy",
"jaded",
"jagged",
"jaunty",
"jealous",
"jittery",
"joint",
"jolly",
"jovial",
"joyful",
"joyous",
"jubilant",
"judicious",
"juicy",
"jumbo",
"jumpy",
"junior",
"juvenile",
"kaleidoscopic",
"keen",
"key",
"kind",
"kindhearted",
"kindly",
"klutzy",
"knobby",
"knotty",
"knowing",
"knowledgeable",
"known",
"kooky",
"kosher",
"lame",
"lanky",
"large",
"last",
"lasting",
"late",
"lavish",
"lawful",
"lazy",
"leading",
"leafy",
"lean",
"left",
"legal",
"legitimate",
"light",
"lighthearted",
"likable",
"likely",
"limited",
"limp",
"limping",
"linear",
"lined",
"liquid",
"little",
"live",
"lively",
"livid",
"loathsome",
"lone",
"lonely",
"long",
"loose",
"lopsided",
"lost",
"loud",
"lovable",
"lovely",
"loving",
"low",
"loyal",
"lucky",
"lumbering",
"luminous",
"lumpy",
"lustrous",
"luxurious",
"mad",
"magnificent",
"majestic",
"major",
"male",
"mammoth",
"married",
"marvelous",
"masculine",
"massive",
"mature",
"meager",
"mealy",
"mean",
"measly",
"meaty",
"medical",
"mediocre",
"medium",
"meek",
"mellow",
"melodic",
"memorable",
"menacing",
"merry",
"messy",
"metallic",
"mild",
"milky",
"mindless",
"miniature",
"minor",
"minty",
"miserable",
"miserly",
"misguided",
"misty",
"mixed",
"modern",
"modest",
"moist",
"monstrous",
"monthly",
"monumental",
"moral",
"mortified",
"motherly",
"motionless",
"mountainous",
"muddy",
"muffled",
"multicolored",
"mundane",
"murky",
"mushy",
"musty",
"muted",
"mysterious",
"naive",
"narrow",
"nasty",
"natural",
"naughty",
"nautical",
"near",
"neat",
"necessary",
"needy",
"negative",
"neglected",
"negligible",
"neighboring",
"nervous",
"new",
"next",
"nice",
"nifty",
"nimble",
"nippy",
"nocturnal",
"noisy",
"nonstop",
"normal",
"notable",
"noted",
"noteworthy",
"novel",
"noxious",
"numb",
"nutritious",
"nutty",
"obedient",
"obese",
"oblong",
"obvious",
"occasional",
"odd",
"oddball",
"offbeat",
"offensive",
"official",
"oily",
"old",
"only",
"open",
"optimal",
"optimistic",
"opulent",
"orange",
"orderly",
"ordinary",
"organic",
"original",
"ornate",
"ornery",
"other",
"our",
"outgoing",
"outlandish",
"outlying",
"outrageous",
"outstanding",
"oval",
"overcooked",
"overdue",
"overjoyed",
"overlooked",
"palatable",
"pale",
"paltry",
"parallel",
"parched",
"partial",
"passionate",
"past",
"pastel",
"peaceful",
"peppery",
"perfect",
"perfumed",
"periodic",
"perky",
"personal",
"pertinent",
"pesky",
"pessimistic",
"petty",
"phony",
"physical",
"piercing",
"pink",
"pitiful",
"plain",
"plaintive",
"plastic",
"playful",
"pleasant",
"pleased",
"pleasing",
"plucky",
"plump",
"plush",
"pointed",
"pointless",
"poised",
"polished",
"polite",
"political",
"poor",
"popular",
"portly",
"posh",
"positive",
"possible",
"potable",
"powerful",
"powerless",
"practical",
"precious",
"present",
"prestigious",
"pretty",
"previous",
"pricey",
"prickly",
"primary",
"prime",
"pristine",
"private",
"prize",
"probable",
"productive",
"profitable",
"profuse",
"proper",
"proud",
"prudent",
"punctual",
"pungent",
"puny",
"pure",
"purple",
"pushy",
"putrid",
"puzzled",
"puzzling",
"quaint",
"qualified",
"quarrelsome",
"quarterly",
"queasy",
"querulous",
"questionable",
"quick",
"quiet",
"quintessential",
"quirky",
"quixotic",
"quizzical",
"radiant",
"ragged",
"rapid",
"rare",
"rash",
"raw",
"ready",
"real",
"realistic",
"reasonable",
"recent",
"reckless",
"rectangular",
"red",
"reflecting",
"regal",
"regular",
"reliable",
"relieved",
"remarkable",
"remorseful",
"remote",
"repentant",
"repulsive",
"required",
"respectful",
"responsible",
"revolving",
"rewarding",
"rich",
"right",
"rigid",
"ringed",
"ripe",
"roasted",
"robust",
"rosy",
"rotating",
"rotten",
"rough",
"round",
"rowdy",
"royal",
"rubbery",
"ruddy",
"rude",
"rundown",
"runny",
"rural",
"rusty",
"sad",
"safe",
"salty",
"same",
"sandy",
"sane",
"sarcastic",
"sardonic",
"satisfied",
"scaly",
"scarce",
"scared",
"scary",
"scented",
"scholarly",
"scientific",
"scornful",
"scratchy",
"scrawny",
"second",
"secondary",
"secret",
"selfish",
"sentimental",
"separate",
"serene",
"serious",
"serpentine",
"several",
"severe",
"shabby",
"shadowy",
"shady",
"shallow",
"shameful",
"shameless",
"sharp",
"shimmering",
"shiny",
"shocked",
"shocking",
"shoddy",
"short",
"showy",
"shrill",
"shy",
"silent",
"silky",
"silly",
"silver",
"similar",
"simple",
"simplistic",
"sinful",
"single",
"sizzling",
"skeletal",
"skinny",
"sleepy",
"slight",
"slim",
"slimy",
"slippery",
"slow",
"slushy",
"small",
"smart",
"smoggy",
"smooth",
"smug",
"snappy",
"snarling",
"sneaky",
"sniveling",
"snoopy",
"sociable",
"soft",
"soggy",
"solid",
"somber",
"some",
"sophisticated",
"sore",
"sorrowful",
"soulful",
"soupy",
"sour",
"spanish",
"sparkling",
"sparse",
"specific",
"spectacular",
"speedy",
"spherical",
"spicy",
"spiffy",
"spirited",
"spiteful",
"splendid",
"spotless",
"spotted",
"spry",
"square",
"squeaky",
"squiggly",
"stable",
"staid",
"stained",
"stale",
"standard",
"starchy",
"stark",
"starry",
"steel",
"steep",
"sticky",
"stiff",
"stimulating",
"stingy",
"stormy",
"straight",
"strange",
"strict",
"strident",
"striking",
"striped",
"strong",
"studious",
"stunning",
"stupendous",
"stupid",
"sturdy",
"stylish",
"subdued",
"submissive",
"substantial",
"subtle",
"suburban",
"sudden",
"sugary",
"sunny",
"super",
"superb",
"superficial",
"superior",
"supportive",
"surprised",
"suspicious",
"svelte",
"sweaty",
"sweet",
"sweltering",
"swift",
"sympathetic",
"taboo",
"tacky",
"talkative",
"tall",
"tame",
"tan",
"tangible",
"tart",
"tasteful",
"tasty",
"tattered",
"tatty",
"taut",
"tawdry",
"tedious",
"teeming",
"teensy",
"tempting",
"tender",
"tenderised",
"tense",
"tepid",
"terrible",
"terrific",
"testy",
"thankful",
"that",
"these",
"thick",
"thin",
"third",
"thirsty",
"this",
"thorny",
"thorough",
"those",
"thoughtful",
"threadbare",
"thrifty",
"thunderous",
"tidy",
"tight",
"timely",
"tinted",
"tiny",
"tired",
"torn",
"total",
"tough",
"tragic",
"trained",
"traumatic",
"treasured",
"tremendous",
"triangular",
"tricky",
"trifling",
"trim",
"trivial",
"troubled",
"true",
"trusting",
"trustworthy",
"trusty",
"truthful",
"tubby",
"turbulent",
"twin",
"ugly",
"ultimate",
"unacceptable",
"unaware",
"uncomfortable",
"uncommon",
"unconscious",
"understated",
"unequaled",
"uneven",
"unfinished",
"unfit",
"unfolded",
"unfortunate",
"unhappy",
"unhealthy",
"uniform",
"unimportant",
"unique",
"united",
"unkempt",
"unknown",
"unlawful",
"unlined",
"unlucky",
"unnatural",
"unpleasant",
"unrealistic",
"unripe",
"unruly",
"unselfish",
"unsightly",
"unsteady",
"unsung",
"untidy",
"untimely",
"untried",
"untrue",
"unused",
"unusual",
"unwelcome",
"unwieldy",
"unwilling",
"unwitting",
"unwritten",
"upbeat",
"upright",
"upset",
"urban",
"usable",
"used",
"useful",
"useless",
"utilized",
"utter",
"vacant",
"vague",
"vain",
"valid",
"valuable",
"vapid",
"variable",
"vast",
"velvety",
"venerated",
"vengeful",
"verifiable",
"vibrant",
"vicious",
"victorious",
"vigilant",
"vigorous",
"villainous",
"violent",
"violet",
"virtual",
"virtuous",
"visible",
"vital",
"vivacious",
"vivid",
"voluminous",
"wan",
"warlike",
"warm",
"warmhearted",
"warped",
"wary",
"wasteful",
"watchful",
"waterlogged",
"watery",
"wavy",
"weak",
"wealthy",
"weary",
"webbed",
"wee",
"weekly",
"weepy",
"weighty",
"weird",
"welcome",
"wet",
"which",
"whimsical",
"whirlwind",
"whispered",
"white",
"whole",
"whopping",
"wicked",
"wide",
"wiggly",
"wild",
"willing",
"wilted",
"winding",
"windy",
"winged",
"wiry",
"wise",
"witty",
"wobbly",
"woeful",
"wonderful",
"wooden",
"woozy",
"wordy",
"worldly",
"worn",
"worried",
"worrisome",
"worse",
"worst",
"worthless",
"worthwhile",
"worthy",
"wrathful",
"wretched",
"writhing",
"wrong",
"wry",
"yawning",
"yearly",
"yellow",
"yellowish",
"young",
"youthful",
"yummy",
"zany",
"zealous",
"zesty",
"zigzag"
]
animals = [
"aardvark",
"albatross",
"alligator",
"alpaca",
"ant",
"anteater",
"antelope",
"ape",
"armadillo",
"donkey",
"baboon",
"badger",
"barracuda",
"bat",
"bear",
"beaver",
"bee",
"bison",
"boar",
"buffalo",
"butterfly",
"camel",
"capybara",
"caribou",
"cassowary",
"cat",
"caterpillar",
"cattle",
"chamois",
"cheetah",
"chicken",
"chimpanzee",
"chinchilla",
"chough",
"clam",
"cobra",
"cockroach",
"cod",
"cormorant",
"coyote",
"crab",
"crane",
"crocodile",
"crow",
"curlew",
"cyclops",
"deer",
"dinosaur",
"dog",
"dogfish",
"dolphin",
"dotterel",
"dove",
"dragon",
"dragonfly",
"duck",
"dugong",
"dunlin",
"eagle",
"echidna",
"eel",
"eland",
"elephant",
"elk",
"emu",
"fairy",
"falcon",
"ferret",
"finch",
"fish",
"flamingo",
"fly",
"fox",
"frog",
"gazelle",
"gerbil",
"giraffe",
"gnat",
"gnu",
"goat",
"goldfinch",
"goldfish",
"goose",
"gorilla",
"goshawk",
"grasshopper",
"gremlin",
"grouse",
"guanaco",
"gull",
"hamster",
"hare",
"hawk",
"hedgehog",
"heron",
"herring",
"hippopotamus",
"hornet",
"horse",
"human",
"hummingbird",
"hyena",
"ibex",
"ibis",
"jackal",
"jaguar",
"jay",
"jellyfish",
"kangaroo",
"kingfisher",
"koala",
"kookabura",
"kudu",
"lapwing",
"lark",
"lemur",
"leopard",
"lion",
"llama",
"lobster",
"locust",
"loris",
"louse",
"lyrebird",
"magpie",
"mallard",
"manatee",
"mandrill",
"mantis",
"mermaid",
"marten",
"meerkat",
"mink",
"mole",
"mongoose",
"monkey",
"moose",
"mosquito",
"mouse",
"mule",
"narwhal",
"newt",
"nightingale",
"octopus",
"okapi",
"opossum",
"oryx",
"ostrich",
"otter",
"owl",
"oyster",
"panther",
"parrot",
"partridge",
"peafowl",
"pelican",
"penguin",
"pheasant",
"phoenix",
"pig",
"pigeon",
"pony",
"porcupine",
"porpoise",
"quail",
"quetzal",
"rabbit",
"raccoon",
"rail",
"ram",
"rat",
"raven",
"reindeer",
"rhinoceros",
"rook",
"salamander",
"salmon",
"sandpiper",
"sardine",
"scorpion",
"seahorse",
"seal",
"shark",
"sheep",
"shrew",
"skunk",
"snail",
"snake",
"sparrow",
"spider",
"spoonbill",
"squid",
"squirrel",
"starling",
"stingray",
"stinkbug",
"stork",
"swallow",
"swan",
"tapir",
"tarsier",
"termite",
"tiger",
"toad",
"trout",
"turkey",
"turtle",
"urchin",
"unicorn",
"vampire",
"viper",
"vulture",
"wallaby",
"walrus",
"warewolf",
"wasp",
"weasel",
"whale",
"wildcat",
"wolf",
"wolverine",
"wombat",
"woodcock",
"woodpecker",
"worm",
"wren",
"yak",
"zebra",
"zombie",
]
plants = [
"alder",
"almond",
"ambrosia",
"apple",
"apricot",
"arfaj",
"ash",
"azolla",
"bamboo",
"banana",
"baobab",
"bay",
"bean",
"bearberry",
"beech",
"bindweed",
"birch",
"bittercress",
"bittersweet",
"blackberry",
"blackhaw",
"blueberry",
"boxelder",
"boxwood",
"brier",
"broadleaf",
"buckeye",
"cabbage",
"carrot",
"cherry",
"chestnut",
"chrysanthemum",
"clove",
"clover",
"coakum",
"coconut",
"collard",
"colwort",
"coneflower",
"cornel",
"corydalis",
"cress",
"crowfoot",
"cucumber",
"daisy",
"deadnettle",
"dewberry",
"dindle",
"dogwood",
"drumstick",
"duscle",
"eucalyptus",
"eytelia",
"fellenwort",
"felonwood",
"felonwort",
"fennel",
"ferns",
"feverfew",
"fig",
"flax",
"fluxroot",
"fumewort",
"gallberry",
"garget",
"garlic",
"goldenglow",
"gordaldo",
"grapefruit",
"grapevine",
"gutweed",
"haldi",
"harlequin",
"hellebore",
"hemp",
"hogweed",
"holly",
"houseleek",
"huckleberry",
"inkberry",
"ivy",
"juneberry",
"juniper",
"keek",
"kinnikinnik",
"kousa",
"kudzu",
"lavender",
"leek",
"lemon",
"lettuce",
"lilac",
"maize",
"mango",
"maple",
"marina",
"mesquite",
"milfoil",
"milkweed",
"moosewood",
"morel",
"mulberry",
"neem",
"nettle",
"nightshade",
"nosebleed",
"olive",
"onion",
"orange",
"osage",
"parsley",
"parsnip",
"pea",
"peach",
"peanut",
"pear",
"pellitory",
"pine",
"pineapple",
"pistachio",
"plantain",
"pokeroot",
"pokeweed",
"polkweed",
"poplar",
"poppy",
"possumhaw",
"potato",
"pudina",
"quercitron",
"ragweed",
"ragwort",
"rantipole",
"raspberry",
"redbud",
"rhubarb",
"ribwort",
"rice",
"rocket",
"rose",
"rosemary",
"rye",
"sanguinary",
"saskatoon",
"scoke",
"serviceberry",
"shadbush",
"silkweed",
"sneezeweed",
"sneezewort",
"snowdrop",
"sorrel",
"speedwell",
"stammerwort",
"stickweed",
"strawberry",
"sugarcane",
"sugarplum",
"sunflower",
"swinies",
"sycamore",
"tansy",
"tea",
"thimbleberry",
"thimbleweed",
"thistle",
"thyme",
"tomato",
"toothwort",
"trillium",
"tulip",
"tulsi",
"viburnum",
"walnut",
"wheat",
"willow",
"wineberry",
"winterberry",
"woodbine",
"wormwood",
"yarrow",
"zedoary",
] | /resource_namer-1.0.3-py3-none-any.whl/resource_namer/word_lists.py | 0.46952 | 0.498474 | word_lists.py | pypi |
import typing
from contextlib import contextmanager
from queue import Queue, Empty, Full
from typing import Generator, Generic, Optional, List
from threading import Condition
__all__ = ["PoolError", "PoolTimeout", "PoolFull", "Pool", "LazyPool", "__version__"]
__version__ = "0.2.0"
ResourceT = typing.TypeVar("ResourceT")
ResourceFactory = typing.Callable[[], ResourceT]
class PoolError(Exception):
"""Base class for Pool errors.
"""
class PoolTimeout(PoolError):
"""Raised when getting a resource times out.
"""
class PoolFull(PoolError):
"""Raised when putting a resource when the pool is full.
"""
class Pool(Generic[ResourceT]):
"""A generic resource pool.
Parameters:
factory: The factory function that is used to create resources.
pool_size: The max number of resources in the pool at any time.
"""
_pool: Queue
_pool_size: int
def __init__(self, factory: ResourceFactory, *, pool_size: int) -> None:
self._pool = Queue(pool_size)
self._pool_size = pool_size
for _ in range(pool_size):
self.put(factory())
@contextmanager
def reserve(self, timeout: Optional[float] = None) -> Generator[ResourceT, None, None]:
"""Reserve a resource and then put it back.
Example:
with pool.reserve(timeout=10) as res:
print(res)
Raises:
Timeout: If a timeout is given and it expires.
Parameters:
timeout: An optional timeout representing how long to wait
for the resource.
Returns:
A resource.
"""
resource = self.get(timeout=timeout)
try:
yield resource
finally:
self.put(resource)
def get(self, *, timeout: Optional[float] = None) -> ResourceT:
"""Get a resource from the pool.
It's the getter's responsibility to put the resource back once
they're done using it.
Raises:
Timeout: If a timeout is given and it expires.
Parameters:
timeout: An optional timeout representing how long to wait
for the resource.
"""
try:
return self._pool.get(timeout=timeout)
except Empty:
raise PoolTimeout()
def put(self, resource: ResourceT) -> None:
"""Put a resource back.
Raises:
PoolFull: If the resource pool is full.
"""
try:
return self._pool.put_nowait(resource)
except Full:
raise PoolFull()
def __len__(self) -> int:
"""Get the number of resources currently in the pool.
"""
return self._pool.qsize()
class LazyPool(Generic[ResourceT]):
"""A generic resource pool that lazily creates resources.
Parameters:
factory: The factory function that is used to create resources.
pool_size: The max number of resources in the pool at any time.
"""
_factory: ResourceFactory
_cond: Condition
_pool: List[ResourceT]
_pool_size: int
_used_size: int
def __init__(self, factory: ResourceFactory, *, pool_size: int, min_instances: int = 0) -> None:
assert pool_size > min_instances, "pool_size must be larger than min_instances"
self._factory = factory
self._cond = Condition()
self._pool = []
self._pool_size = pool_size
self._used_size = 0
for _ in range(min_instances):
self._used_size += 1
self.put(factory())
@contextmanager
def reserve(self, timeout: Optional[float] = None) -> Generator[ResourceT, None, None]:
"""Reserve a resource and then put it back.
Example:
with pool.reserve(timeout=10) as res:
print(res)
Raises:
Timeout: If a timeout is given and it expires.
Parameters:
timeout: An optional timeout representing how long to wait
for the resource.
Returns:
A resource.
"""
resource = self.get(timeout=timeout)
try:
yield resource
finally:
self.put(resource)
def get(self, *, timeout: Optional[float] = None) -> ResourceT:
"""Get a resource from the pool.
It's the getter's responsibility to put the resource back once
they're done using it.
Raises:
Timeout: If a timeout is given and it expires.
Parameters:
timeout: An optional timeout representing how long to wait
for the resource.
"""
with self._cond:
while not self._pool:
if self._used_size != self._pool_size:
self._used_size += 1
return self._factory()
if not self._cond.wait(timeout):
raise PoolTimeout()
return self._pool.pop()
def put(self, resource: ResourceT) -> None:
"""Put a resource back.
Raises:
PoolFull: If the resource pool is full.
"""
with self._cond:
if len(self._pool) == self._pool_size:
raise PoolFull()
self._pool.append(resource)
self._cond.notify()
def discard(self, resource: ResourceT) -> None:
"""Discard a resource from the pool.
"""
with self._cond:
self._used_size = max(0, self._used_size - 1)
self._cond.notify()
def __len__(self) -> int:
"""Get the number of resources currently in the pool.
"""
return len(self._pool) | /resource_pool-0.2.0-py3-none-any.whl/resource_pool.py | 0.888039 | 0.288049 | resource_pool.py | pypi |
from dataclasses import dataclass, field
from typing import Any, Dict, List
from colorama import Fore, init
from rpdk.guard_rail.utils.miscellaneous import jinja_loader
init()
FAILED_HEADER = f"{Fore.RED}[FAILED]:{Fore.RESET}"
WARNING_HEADER = f"{Fore.YELLOW}[WARNING]:{Fore.RESET}"
PASSED_HEADER = f"{Fore.GREEN}[PASSED]:{Fore.RESET}"
@dataclass
class Stateless:
"""Implements Stateless type for stateless compliance assessment
over specified list of schemas/rules
Args:
schemas (List[Dict[str, Any]]): Collection of Resource Provider Schemas
rules (List[str]): Collection of Custom Compliance Rules
"""
schemas: List[Dict[str, Any]]
rules: List[str] = field(default_factory=list)
@dataclass
class Stateful:
"""Implements Stateful type for stateful compliance assessment
over specified list of rules
Args:
current_schema (Dict[str, Any]): Current State of Resource Provider Schema
previous_schema (Dict[str, Any]): Previous State of Resource Provider Schema
"""
current_schema: Dict[str, Any]
previous_schema: Dict[str, Any]
rules: List[str] = field(default_factory=list)
@dataclass
class GuardRuleResult:
check_id: str = field(default="unidentified")
message: str = field(default="unidentified")
@dataclass
class GuardRuleSetResult:
"""Represents a result of the compliance run.
Contains passed, failed, skipped and warning rules
Attributes:
compliant: rules, that schema(s) passed
non_compliant: rules, that schema(s) failed
warning: rules, that schema(s) failed but it's not a hard requirement
skipped: rules, that are not applicable to the schema(s)
"""
compliant: List[str] = field(default_factory=list)
non_compliant: Dict[str, List[GuardRuleResult]] = field(default_factory=dict)
warning: Dict[str, List[GuardRuleResult]] = field(default_factory=dict)
skipped: List[str] = field(default_factory=list)
def merge(self, guard_ruleset_result: Any):
"""Merges the result into a nice mutual set.
Args:
guard_ruleset_result (Any): result in a raw form
"""
if not isinstance(guard_ruleset_result, GuardRuleSetResult):
raise TypeError("cannot merge with non GuardRuleSetResult type")
self.compliant.extend(guard_ruleset_result.compliant)
self.skipped.extend(guard_ruleset_result.skipped)
self.non_compliant = {
**self.non_compliant,
**guard_ruleset_result.non_compliant,
}
self.warning = {
**self.warning,
**guard_ruleset_result.warning,
}
def __str__(self):
if (
not self.compliant
and not self.non_compliant
and not self.skipped
and not self.warning
):
return "Couldn't retrieve the result"
environment = jinja_loader(__name__)
template = environment.get_template("guard-result-pojo.output")
return template.render(
skipped_rules=self.skipped,
passed_rules=self.compliant,
failed_rules=self.non_compliant,
warning_rules=self.warning,
failed_header=FAILED_HEADER,
warning_header=WARNING_HEADER,
passed_header=PASSED_HEADER,
) | /resource-schema-guard-rail-0.0.7.tar.gz/resource-schema-guard-rail-0.0.7/src/rpdk/guard_rail/core/data_types.py | 0.827619 | 0.221898 | data_types.py | pypi |
import argparse
import json
import re
from functools import wraps
from typing import Sequence
from .common import (
FILE_PATTERN,
GUARD_FILE_PATTERN,
GUARD_PATH_EXTRACT_PATTERN,
JSON_PATH_EXTRACT_PATTERN,
SCHEMA_FILE_PATTERN,
read_file,
)
from .logger import LOG, logdebug
def apply_rule(execute_rule, msg, /):
"""Factory function to provide generic validation annotation"""
def validation_wrapper(func: object):
@wraps(func)
def wrapper(args):
assert execute_rule(args), msg
return func(args)
return wrapper
return validation_wrapper
@apply_rule(
lambda args: len(args.schemas) == 2 if args.stateful else True,
"If Stateful mode is executed, then two schemas MUST be provided (current/previous)",
)
def argument_validation(
args: argparse.Namespace,
): # pylint: disable=unused-argument,C0116
pass
@logdebug
def setup_args(): # pylint: disable=C0116
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--version", action="version", version="v0.1alpha")
parser.add_argument(
"--schema",
dest="schemas",
action="extend",
nargs="+",
type=str,
required=True,
help="Should specify schema for CFN compliance evaluation (path or plain value)",
)
parser.add_argument(
"--stateful",
dest="stateful",
action="store_true",
default=False,
help="If specified will execute stateful compliance evaluation",
)
parser.add_argument(
"--format",
dest="format",
action="store_true",
default=False,
help="Should specify schema for CFN compliance evaluation (path or plain value)",
)
parser.add_argument(
"--rules",
dest="rules",
action="extend",
nargs="+",
type=str,
help="Should specify additional rules for compliance evaluation (path of `.guard` file)",
)
return parser
@logdebug
@apply_rule(
lambda input_path: re.search(FILE_PATTERN, input_path),
"file path must be specified with `file://...`",
)
@apply_rule(
lambda input_path: re.search(SCHEMA_FILE_PATTERN, input_path),
"not a valid json file `...(.json)`",
)
def schema_input_path_validation(input_path: str): # pylint: disable=C0116
pass
@logdebug
@apply_rule(
lambda input_path: re.search(FILE_PATTERN, input_path),
"file path must be specified with `file://...`",
)
def rule_input_path_validation(input_path: str): # pylint: disable=C0116
pass
@logdebug
def collect_schemas(schemas: Sequence[str] = None):
"""Collecting schemas.
Reading schemas from local or serializes into json if provided in escaped form.
Args:
schemas (Sequence[str], optional): list of schemas
Returns:
List: list of deserialized schemas
"""
_schemas = []
@logdebug
def __to_json(schema_raw: str):
try:
return json.loads(schema_raw)
except json.JSONDecodeError as ex:
raise ValueError(
f"Could not deserialize schema directly - invalid Schema Body {ex}. Trying access it as a file"
) from ex
if schemas:
for schema_item in schemas:
LOG.info(schema_item)
schema_deser = None
try:
schema_deser = __to_json(schema_item)
except ValueError as e:
LOG.info(e)
if schema_deser is None:
schema_input_path_validation(schema_item)
path = "/" + re.search(JSON_PATH_EXTRACT_PATTERN, schema_item).group(0)
file_obj = read_file(path)
schema_deser = __to_json(file_obj)
_schemas.append(schema_deser)
return _schemas
@logdebug
def collect_rules(rules: Sequence[str] = None):
"""Collecting rules.
Args:
rules (Sequence[str], optional): list of rules
Returns:
List: list of deserialized rules
"""
_rules = []
if rules:
for rule in rules:
rule_input_path_validation(rule)
if re.search(GUARD_FILE_PATTERN, rule):
path = "/" + re.search(GUARD_PATH_EXTRACT_PATTERN, rule).group(0)
file_obj = read_file(path)
_rules.append(file_obj)
else:
raise ValueError("file extenstion is invalid - MUST be `.guard`")
return _rules | /resource-schema-guard-rail-0.0.7.tar.gz/resource-schema-guard-rail-0.0.7/src/rpdk/guard_rail/utils/arg_handler.py | 0.730386 | 0.21984 | arg_handler.py | pypi |
import inspect
from collections.abc import Mapping, Sequence
from operator import attrgetter
from .utils import _nested_update
class Translator:
"""A translator superclass.
Attributes
----------
resource
The resource to translate.
from_map : bool
If ``True``, `resource` attributes will be indexed by key.
repr : dict
The translated resource.
"""
def __init__(self, resource, from_map=False, **kwargs):
"""
Parameters
----------
resource
The resource to translate. By default, attributes are accessed via dot
notation (see `from_map` below).
from_map : bool, optional
If ``True``, `resource` attributes will be indexed by key.
**kwargs
Key-value pairs to be set on the translated resource. To set nested
attributes, pass a mapping (the key will remain the top-level attribute).
The final key-value pair overwrites.
"""
self.resource = resource
self.from_map = from_map
self.repr = self.constants.copy() if hasattr(self, "constants") else {}
if hasattr(self, "mapping"):
self.repr.update(self._add_mapping(self.mapping))
for attr, meth in filter(
lambda attr_meth: hasattr(attr_meth[1], "_attr"),
inspect.getmembers(self, inspect.ismethod),
):
val = meth()
if val is not None:
if isinstance(meth._attr, tuple) and meth._attr:
nest = self.repr.setdefault(meth._attr[0], {})
for key in meth._attr[1:]:
nest = nest.setdefault(key, {})
nest[attr] = val
else:
self.repr[attr] = val
_nested_update(self.repr, kwargs)
def _add_mapping(self, mapping):
repr = {}
for attr, val in mapping.items():
if isinstance(val, str):
try:
pot_val = (
self.resource[val]
if self.from_map
else attrgetter(val)(self.resource)
)
except (KeyError, AttributeError):
continue
elif isinstance(val, Sequence):
if not self.from_map:
raise TypeError(
"Sequences are used to fetch nested keys when `from_map` is `True`."
)
elif not val:
continue
pot_val = self.resource
for key in val:
try:
pot_val = pot_val[key]
except KeyError:
pot_val = None
break
elif isinstance(val, Mapping):
pot_val = self._add_mapping(val) or None
if pot_val is not None:
repr[attr] = pot_val
return repr
def attr(*f_or_keys):
"""A translator method decorator for dynamic attributes.
The decorated function's name becomes the attribute's key. To create nested
attributes, pass each higher-level key in order. If not providing keys, it is not
necessary to call the decorator.
Parameters
----------
*f_or_keys : callable or str
If ``str``, ordered keys to create nested attributes.
"""
if f_or_keys and callable(f_or_keys[0]):
f_or_keys[0]._attr = True
return f_or_keys[0]
def dec(f):
f._attr = f_or_keys or True
return f
return dec
class AbortTranslation(Exception):
pass | /resource_translate-1.2.0-py3-none-any.whl/resource_translate/__init__.py | 0.805709 | 0.182316 | __init__.py | pypi |
from typing import Dict, Any, List
from respect_validation.Exceptions import NestedValidationException
class FormValidator(object):
_errors: List[Any] = []
_error_messages: Dict[str, Any] = {}
def validate(self, request: Dict[str, Any], rules: Dict[str, Any], check_missed: bool = False,
check_unknown: bool = True, templates: Dict[str, str] = {}) -> 'FormValidator':
self._errors = []
self._error_messages = {}
received_fields = list(request.keys())
if check_unknown:
self._error_messages["_unknown_"] = None
for field, rule in rules.items():
self._error_messages[field] = None
if check_missed and field not in received_fields:
self._errors.append({field: ["Item {} must be present".format(field)]})
self._error_messages[field] = ["Item {} must be present".format(field)]
continue
item = request.get(field, None)
try:
if rule.get_name() is None:
rule.set_name(field[0].upper() + field[1:])
rule.claim(item)
except NestedValidationException as nve:
self._errors.append(nve.get_messages(templates))
self._error_messages[field] = nve.get_messages(templates)
if field in received_fields:
received_fields.remove(field)
if check_unknown and len(received_fields):
self._error_messages["_unknown_"] = []
for f in received_fields:
self._error_messages["_unknown_"].append("Unknown field {}".format(f))
if self._error_messages.get("_unknown_"):
self._errors.append({"_unknown_": self._error_messages.get("_unknown_")})
return self
def failed(self) -> bool:
return len(self._errors) > 0
def get_errors(self):
return self._errors
def get_messages(self) -> Dict[str, Any]:
return self._error_messages | /respect_validation-1.3.0-py3-none-any.whl/respect_validation/FormValidator.py | 0.513912 | 0.206974 | FormValidator.py | pypi |
from typing import Dict, Any, Optional, List
class ValidationException(Exception):
MODE_DEFAULT = 'default'
MODE_NEGATIVE = 'negative'
STANDARD = 'standard'
_input = None
_id: str = ''
_mode = 'default'
_params: Dict[str, Any] = {}
_template = 'standard'
_message = None
_default_templates = {
'default': {
'standard': '{name} must be valid'
},
'negative': {
'standard': '{name} must not be valid'
},
}
_translated_templates: Optional[Dict[str, Any]] = None
def __init__(self, input, _id, params, translation: Optional[Dict[str, Any]] = None):
self._mode = self.MODE_DEFAULT
self._exceptions: List[Any] = list()
self._input = input
self._id = _id
self._params = params
self._translated_templates = translation
self._template = self.choose_template()
if not self._params.get('name', False):
self._params['name'] = '"'+str(input)+'"'
super().__init__(self._create_message().format(**params))
def choose_template(self) -> str:
return list(self._default_templates[self._mode].keys())[0]
def refresh_template(self) -> 'ValidationException':
self._template = self.choose_template()
return self
def _create_message(self) -> str:
if not self._default_templates[self._mode].get(self._template):
return self._template
if self._translated_templates and self._translated_templates.get(self._mode) and \
self._translated_templates[self._mode].get(self._template):
return str(self._translated_templates[self._mode][self._template])
return self._default_templates[self._mode][self._template]
def get_message(self) -> str:
return str(self)
def get_id(self) -> str:
return self._id
def get_params(self) -> Dict[str, Any]:
return self._params
def get_param(self, name: str):
return self._params.get(name, None)
def set_param(self, param_name: str, param_val) -> 'ValidationException':
self._params[param_name] = param_val
return self
def update_params(self, params: Dict[str, Any]) -> None:
self._params = params
self._message = self._create_message()
return
def update_mode(self, mode: str) -> None:
self._mode = mode
self._message = self._create_message()
return
def update_template(self, template: str) -> None:
self._template = template
self._message = self._create_message()
return
def has_customer_template(self) -> bool:
return bool(self._default_templates[self._mode].get(self._template, False))
def __str__(self):
return str(self._create_message().format(**self._params)) | /respect_validation-1.3.0-py3-none-any.whl/respect_validation/Exceptions/ValidationException.py | 0.879503 | 0.180829 | ValidationException.py | pypi |
import re
from os.path import getsize
from typing import Optional, Union
from respect_validation.Exceptions import ComponentException
from respect_validation.Rules.AbstractRule import AbstractRule
class Size(AbstractRule):
_min_size = None
_max_size = None
_min_value = None
_max_value = None
def __init__(self, min_size: Optional[str] = None, max_size: Optional[str] = None):
super().__init__()
if min_size is not None and isinstance(min_size, str):
self._min_size = min_size
self._min_value = self._to_bytes(min_size)
if max_size is not None and isinstance(max_size, str):
self._max_size = max_size
self._max_value = self._to_bytes(max_size)
if self._min_value is None and self._max_value is None:
raise ComponentException("Set correct file size, for example 1kb, 2mb, 3gb") from None
if self._min_value and self._max_value and self._min_value > self._max_value:
raise ComponentException("Minimum value must be less than or equals to maximum") from None
self.set_param('min_size', min_size)
self.set_param('max_size', max_size)
self.set_param('max_value', self._max_value)
self.set_param('min_value', self._min_value)
def validate(self, input_val) -> bool:
if isinstance(input_val, str):
return self._is_valid_size(getsize(input_val))
return False
def _to_bytes(self, size: str) -> float:
value: Union[float, None] = None
units = ['b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
for exponent in range(len(units)):
probe = re.match(re.compile(r'^(\d+(.\d+)?)'+units[exponent]+'$', re.IGNORECASE), size)
if not probe:
continue
value = float(probe.groups()[0]) * 1024 ** exponent
if not isinstance(value, float):
raise ComponentException('"{}" is not a recognized file size.'.format(size))
return value
def _is_valid_size(self, size: float) -> bool:
if self._min_value is not None and self._max_value is not None:
return self._min_value <= size <= self._max_value
if self._min_value is not None:
return size >= self._min_value
if self._max_value is not None:
return size <= self._max_value
return False | /respect_validation-1.3.0-py3-none-any.whl/respect_validation/Rules/Size.py | 0.830525 | 0.230324 | Size.py | pypi |
from typing import List, Union
from respect_validation.Exceptions import ComponentException
from respect_validation.Rules.AbstractWrapper import AbstractWrapper
from respect_validation.Rules.AllOf import AllOf
from respect_validation.Rules.Key import Key
class KeySet(AbstractWrapper):
_keys: List[str]
_key_rules: List[Key]
def __init__(self, *keys: Key):
self._key_rules = [self._get_key_rule(v) for v in keys]
self._keys = [self._get_key_reference(k) for k in self._key_rules]
super().__init__(AllOf(*self._key_rules))
self.set_param('keys', self._keys)
def claim(self, input_val) -> None:
if not self._has_valid_structure(input_val):
raise self.report_error(input_val) from None
super().claim(input_val)
def check(self, input_val) -> None:
if not self._has_valid_structure(input_val):
raise self.report_error(input_val) from None
super().check(input_val)
def validate(self, input_val) -> bool:
if not self._has_valid_structure(input_val):
return False
return super().validate(input_val)
def _get_key_rule(self, validatable: Union[Key, AllOf]) -> 'Key':
if isinstance(validatable, Key):
return validatable
if not isinstance(validatable, AllOf) or len(validatable.get_rules()) != 1:
raise ComponentException('KeySet rule accepts only Key rules') from None
return self._get_key_rule(validatable.get_rules()[0])
def _get_key_reference(self, rule: Key):
return rule.get_reference()
def _has_valid_structure(self, input_val) -> bool:
if not isinstance(input_val, dict):
return False
temp_input = input_val.copy()
for key_rule in self._key_rules:
if key_rule.get_reference() not in temp_input.keys() and key_rule.is_mandatory():
return False
if key_rule.get_reference() in temp_input.keys():
del temp_input[key_rule.get_reference()]
return len(temp_input.keys()) == 0 | /respect_validation-1.3.0-py3-none-any.whl/respect_validation/Rules/KeySet.py | 0.833494 | 0.289797 | KeySet.py | pypi |
from ipaddress import ip_address, ip_network
from typing import Optional
from respect_validation.Exceptions import ComponentException
from respect_validation.Rules.AbstractRule import AbstractRule
class Ip(AbstractRule):
_ip_range = None
_private = None
def __init__(self, ip_range: str = '*', private: bool = False):
super().__init__()
self._ip_range = ip_range
self._private = private
if self._parse_range_and_validate(ip_range):
self.set_param('ip_range', ip_range)
def validate(self, input_val: str):
try:
ip_addr = ip_address(input_val)
except Exception:
self.set_param('ip_range', None)
return False
if self._private and not ip_addr.is_private:
self.set_param('must_be_private', True)
return False
if not self.get_param('ip_range'):
return True
return self._parse_range_and_validate(self._ip_range, input_val) # type: ignore
def _parse_range_and_validate(self, ip_range: str, ip_addr: Optional[str] = None):
if ip_range == '*':
return False
if '-' in ip_range:
try:
min_ip = ip_address(str(ip_range.split('-')[0].strip()))
max_ip = ip_address(str(ip_range.split('-')[1].strip()))
except Exception:
raise ComponentException('Invalid network range') from None
if ip_addr is not None and \
(type(min_ip) != type(max_ip) or type(min_ip) != type(ip_address(ip_addr))): # type: ignore
raise ComponentException('Incompatible version of IP protocol') from None
if ip_addr is not None:
return min_ip <= ip_address(ip_addr) <= max_ip # type: ignore
return True
if '/' in ip_range:
try:
network = ip_network(ip_range)
except Exception:
raise ComponentException('Invalid network range') from None
if ip_addr:
return ip_address(ip_addr) in network
return True
raise ComponentException('Invalid network range') from None | /respect_validation-1.3.0-py3-none-any.whl/respect_validation/Rules/Ip.py | 0.792986 | 0.165796 | Ip.py | pypi |
from respect_validation.Exceptions import ComponentException
from respect_validation.Rules.AbstractEnvelope import AbstractEnvelope
from respect_validation.Rules.CountryCode import CountryCode
from respect_validation.Rules.Regex import Regex
class PostalCode(AbstractEnvelope):
DEFAULT_PATTERN = r'^$'
POSTAL_CODES = {
'AD': r'^(?:AD)*(\d{3})$',
'AL': r'^(\d{4})$',
'AM': r'^(\d{4})$',
'AR': r'^[A-Z]?\d{4}[A-Z]{0,3}$',
'AS': r'96799',
'AT': r'^(\d{4})$',
'AU': r'^(\d{4})$',
'AX': r'^(?:FI)*(\d{5})$',
'AZ': r'^(?:AZ)*(\d{4})$',
'BA': r'^(\d{5})$',
'BB': r'^(?:BB)*(\d{5})$',
'BD': r'^(\d{4})$',
'BE': r'^(\d{4})$',
'BG': r'^(\d{4})$',
'BH': r'^(\d{3}\d?)$',
'BL': r'^(\d{5})$',
'BM': r'^([A-Z]{2}\d{2})$',
'BN': r'^([A-Z]{2}\d{4})$',
'BR': r'^\d{5}-?\d{3}$',
'BY': r'^(\d{6})$',
'CA': r'^([ABCEGHJKLMNPRSTVXY]\d[ABCEGHJKLMNPRSTVWXYZ]) ?(\d[ABCEGHJKLMNPRSTVWXYZ]\d)$',
'CH': r'^(\d{4})$',
'CL': r'^(\d{7})$',
'CN': r'^(\d{6})$',
'CO': r'^(\d{6})$',
'CR': r'^(\d{5})$',
'CS': r'^(\d{5})$',
'CU': r'^(?:CP)*(\d{5})$',
'CV': r'^(\d{4})$',
'CX': r'^(\d{4})$',
'CY': r'^(\d{4})$',
'CZ': r'^\d{3}\s?\d{2}$',
'DE': r'^(\d{5})$',
'DK': r'^(\d{4})$',
'DO': r'^(\d{5})$',
'DZ': r'^(\d{5})$',
'EC': r'^(\d{6})$',
'EE': r'^(\d{5})$',
'EG': r'^(\d{5})$',
'ES': r'^(\d{5})$',
'ET': r'^(\d{4})$',
'FI': r'^(?:FI)*(\d{5})$',
'FM': r'^(\d{5})$',
'FO': r'^(?:FO)*(\d{3})$',
'FR': r'^(\d{5})$',
'GB': r'^([Gg][Ii][Rr] 0[Aa]{2})|((([A-Za-z][0-9]{1,2})|(([A-Za-z][A-Ha-hJ-Yj-y][0-9]{1,2})|(([A-Za-z][0-9]['
r'A-Za-z])|([A-Za-z][A-Ha-hJ-Yj-y][0-9]?[A-Za-z])))) [0-9][A-Za-z]{2})$',
'GE': r'^(\d{4})$',
'GF': r'^((97|98)3\d{2})$',
'GG': r'^((?:(?:[A-PR-UWYZ][A-HK-Y]\d[ABEHMNPRV-Y0-9]|[A-PR-UWYZ]\d[A-HJKPS-UW0-9])\s\d[ABD-HJLNP-UW-Z]{'
r'2})|GIR\s?0AA)$',
'GL': r'^(\d{4})$',
'GP': r'^((97|98)\d{3})$',
'GR': r'^(\d{3}\s?\d{2})$',
'GT': r'^(\d{5})$',
'GU': r'^(969\d{2})$',
'GW': r'^(\d{4})$',
'HN': r'^([A-Z]{2}\d{4})$',
'HR': r'^(?:HR)*(\d{5})$',
'HT': r'^(?:HT)*(\d{4})$',
'HU': r'^(\d{4})$',
'ID': r'^(\d{5})$',
'IE': r'^(D6W|[AC-FHKNPRTV-Y][0-9]{2})\s?([AC-FHKNPRTV-Y0-9]{4})',
'IL': r'^(\d{7}|\d{5})$',
'IM': r'^((?:(?:[A-PR-UWYZ][A-HK-Y]\d[ABEHMNPRV-Y0-9]|[A-PR-UWYZ]\d[A-HJKPS-UW0-9])\s\d[ABD-HJLNP-UW-Z]{'
r'2})|GIR\s?0AA)$',
'IN': r'^(\d{6})$',
'IQ': r'^(\d{5})$',
'IR': r'^(\d{10})$',
'IS': r'^(\d{3})$',
'IT': r'^(\d{5})$',
'JE': r'^((?:(?:[A-PR-UWYZ][A-HK-Y]\d[ABEHMNPRV-Y0-9]|[A-PR-UWYZ]\d[A-HJKPS-UW0-9])\s\d[ABD-HJLNP-UW-Z]{'
r'2})|GIR\s?0AA)$',
'JO': r'^(\d{5})$',
'JP': r'^\d{3}-\d{4}$',
'KE': r'^(\d{5})$',
'KG': r'^(\d{6})$',
'KH': r'^(\d{5})$',
'KP': r'^(\d{6})$',
'KR': r'^(\d{5})$',
'KW': r'^(\d{5})$',
'KY': r'^KY[1-3]-\d{4}$',
'KZ': r'^(\d{6})$',
'LA': r'^(\d{5})$',
'LB': r'^(\d{4}(\d{4})?)$',
'LI': r'^(\d{4})$',
'LK': r'^(\d{5})$',
'LR': r'^(\d{4})$',
'LS': r'^(\d{3})$',
'LT': r'^(?:LT)*(\d{5})$',
'LU': r'^(?:L-)?\d{4}$',
'LV': r'^(?:LV)*(\d{4})$',
'MA': r'^(\d{5})$',
'MC': r'^(\d{5})$',
'MD': r'^MD-\d{4}$',
'ME': r'^(\d{5})$',
'MF': r'^(\d{5})$',
'MG': r'^(\d{3})$',
'MH': r'^969\d{2}(-\d{4})$',
'MK': r'^(\d{4})$',
'MM': r'^(\d{5})$',
'MN': r'^(\d{6})$',
'MP': r'^9695\d{1}$',
'MQ': r'^(\d{5})$',
'MT': r'^[A-Z]{3}\s?\d{4}$',
'MV': r'^(\d{5})$',
'MW': r'^(\d{6})$',
'MX': r'^(\d{5})$',
'MY': r'^(\d{5})$',
'MZ': r'^(\d{4})$',
'NC': r'^(\d{5})$',
'NE': r'^(\d{4})$',
'NF': r'^(\d{4})$',
'NG': r'^(\d{6})$',
'NI': r'^(\d{7})$',
'NL': r'^(\d{4} ?[A-Z]{2})$',
'NO': r'^(\d{4})$',
'NP': r'^(\d{5})$',
'NZ': r'^(\d{4})$',
'OM': r'^(\d{3})$',
'PF': r'^((97|98)7\d{2})$',
'PG': r'^(\d{3})$',
'PH': r'^(\d{4})$',
'PK': r'^(\d{5})$',
'PL': r'^\d{2}-\d{3}$',
'PM': r'^(97500)$',
'PR': r'^00[679]\d{2}(?:-\d{4})?$',
'PT': r'^\d{4}-?\d{3}$',
'PW': r'^(96940)$',
'PY': r'^(\d{4})$',
'RE': r'^((97|98)(4|7|8)\d{2})$',
'RO': r'^(\d{6})$',
'RS': r'^(\d{5})$',
'RU': r'^(\d{6})$',
'SA': r'^(\d{5})$',
'SD': r'^(\d{5})$',
'SE': r'^(?:SE)?\d{3}\s\d{2}$',
'SG': r'^(\d{6})$',
'SH': r'^(STHL1ZZ)$',
'SI': r'^(?:SI)*(\d{4})$',
'SJ': r'^(\d{4})$',
'SK': r'^\d{3}\s?\d{2}$',
'SM': r'^(4789\d)$',
'SN': r'^(\d{5})$',
'SO': r'^([A-Z]{2}\d{5})$',
'SV': r'^(?:CP)*(\d{4})$',
'SZ': r'^([A-Z]\d{3})$',
'TC': r'^(TKCA 1ZZ)$',
'TH': r'^(\d{5})$',
'TJ': r'^(\d{6})$',
'TM': r'^(\d{6})$',
'TN': r'^(\d{4})$',
'TR': r'^(\d{5})$',
'TW': r'^(\d{5})$',
'UA': r'^(\d{5})$',
'US': r'^\d{5}(-\d{4})?$',
'UY': r'^(\d{5})$',
'UZ': r'^(\d{6})$',
'VA': r'^(\d{5})$',
'VE': r'^(\d{4})$',
'VI': r'^008\d{2}(?:-\d{4})?$',
'VN': r'^(\d{6})$',
'WF': r'^(986\d{2})$',
'YT': r'^(\d{5})$',
'ZA': r'^(\d{4})$',
'ZM': r'^(\d{5})$'
}
def __init__(self, country_code: str):
country_code_rule = CountryCode()
if not country_code_rule.validate(country_code) or country_code not in self.POSTAL_CODES.keys():
raise ComponentException('Cannot validate postal code from "{}" country'.format(country_code)) from None
super().__init__(Regex(self.POSTAL_CODES[country_code]), {'country_code': country_code}) | /respect_validation-1.3.0-py3-none-any.whl/respect_validation/Rules/PostalCode.py | 0.511473 | 0.600042 | PostalCode.py | pypi |
from typing import Optional, Any
from respect_validation.Exceptions import ValidationException
from respect_validation.Rules.AbstractRule import AbstractRule
class AbstractRelated(AbstractRule):
_reference: Any
_rule: Optional[AbstractRule] = None
_mandatory: bool = True
def __init__(self, reference: Any, rule: Optional[AbstractRule] = None, mandatory: bool = True):
super().__init__()
self._reference = reference
self._rule = rule
self._mandatory = mandatory
if rule and rule.get_name() is not None:
self.set_name(rule.get_name())
elif isinstance(reference, str):
self.set_name(reference)
# base method for new rule
def has_reference(self, input_val) -> bool:
return False
# base method for new rule
def get_reference_value(self, input_val) -> bool:
return False
def get_reference(self):
return self._reference
def is_mandatory(self) -> bool:
return self._mandatory
def set_name(self, name) -> 'AbstractRelated':
super().set_name(name)
if isinstance(self._rule, AbstractRule):
self._rule.set_name(name)
return self
def claim(self, input_val) -> None:
has_reference = self.has_reference(input_val)
if self._mandatory and not has_reference:
raise self.report_error(input_val, {'has_reference': False})
if self._rule is None or not has_reference:
return
try:
self._rule.claim(self.get_reference_value(input_val))
except ValidationException as e:
nested_validation_exception = self.report_error(self._reference, {'has_reference': True})
nested_validation_exception.add_child(e) # type: ignore
raise nested_validation_exception
def check(self, input_val) -> None:
has_reference = self.has_reference(input_val)
if self._mandatory and not has_reference:
raise self.report_error(input_val, {'has_reference': False})
if self._rule is None or not has_reference:
return
self._rule.check(self.get_reference_value(input_val))
def validate(self, input_val) -> bool:
has_reference = self.has_reference(input_val)
if self._mandatory and not has_reference:
return False
if self._rule is None or not has_reference:
return True
return self._rule.validate(self.get_reference_value(input_val)) | /respect_validation-1.3.0-py3-none-any.whl/respect_validation/Rules/AbstractRelated.py | 0.852506 | 0.202226 | AbstractRelated.py | pypi |
from typing import List, Type, Union
from django import forms
from django.forms import ModelForm
from edc_list_data.model_mixins import ListModelMixin
from respond_models.stubs import DrugSupplyNcdFormMixinStub as FormMixinStub
from ..utils import validate_total_days
class DrugSupplyNcdFormMixin:
list_model_cls: Type[ListModelMixin] = None
def clean(self: Union[FormMixinStub, ModelForm]) -> dict:
cleaned_data = super().clean()
data = dict(self.data.lists())
rx = self.list_model_cls.objects.filter(id__in=data.get("rx") or [])
rx_names = [obj.display_name for obj in rx]
inline_drug_names = self.raise_on_duplicates()
validate_total_days(self)
if (
self.cleaned_data.get("drug")
and self.cleaned_data.get("drug").display_name not in rx_names
):
treatment = " + ".join(rx_names)
raise forms.ValidationError(
f"Invalid. `{self.cleaned_data.get('drug').display_name}` "
f"not in current treatment of `{treatment}`"
)
self.raise_on_missing_drug(rx_names, inline_drug_names)
return cleaned_data
def raise_on_duplicates(self: forms.ModelForm) -> list:
drug_names = []
total_forms = self.data.get(f"{self.relation_label}_set-TOTAL_FORMS")
for form_index in range(0, int(total_forms or 0)):
inline_rx_id = self.data.get(f"{self.relation_label}_set-{form_index}-drug")
if inline_rx_id:
rx_obj = self.list_model_cls.objects.get(id=int(inline_rx_id))
if rx_obj.display_name in drug_names:
raise forms.ValidationError("Invalid. Duplicates not allowed")
drug_names.append(rx_obj.display_name)
return drug_names
@staticmethod
def raise_on_missing_drug(rx_names: List[str], inline_drug_names: List[str]) -> None:
for display_name in rx_names:
if display_name not in inline_drug_names:
raise forms.ValidationError(f"Missing drug. Also expected {display_name}.") | /respond_africa-0.1.13-py3-none-any.whl/respond_forms/mixins/drug_supply_ncd_form_mixin.py | 0.738198 | 0.180775 | drug_supply_ncd_form_mixin.py | pypi |
from django import forms
from edc_constants.constants import NO, OTHER, YES
from edc_form_validators import FormValidator
from edc_model.models import estimated_date_from_ago
from respond_models.constants import HIV_CLINIC
from ..utils import (
raise_if_both_ago_and_actual_date,
raise_if_clinical_review_does_not_exist,
)
class HivInitialReviewFormValidatorMixin(FormValidator):
def clean(self):
super().clean()
raise_if_clinical_review_does_not_exist(self.cleaned_data.get("subject_visit"))
raise_if_both_ago_and_actual_date(
dx_ago=self.cleaned_data.get("dx_ago"), dx_date=self.cleaned_data.get("dx_date")
)
self.match_screening_clinic_or_raise()
self.applicable_if(YES, field="receives_care", field_applicable="clinic")
self.required_if(OTHER, field="clinic", field_required="clinic_other")
self.required_if(YES, field="receives_care", field_required="arv_initiated")
self.validate_art_initiation_date()
self.required_if(YES, field="arv_initiated", field_required="has_vl")
self.validate_viral_load()
self.required_if(YES, field="arv_initiated", field_required="has_cd4")
self.validate_cd4()
def match_screening_clinic_or_raise(self):
if (
self.subject_screening.clinic_type in [HIV_CLINIC]
and self.cleaned_data.get("receives_care") != YES
):
raise forms.ValidationError(
{
"receives_care": (
"Patient was screened from an HIV clinic, expected `Yes`."
),
}
)
def validate_art_initiation_date(self):
self.not_required_if(
NO,
field="arv_initiated",
field_required="arv_initiation_ago",
inverse=False,
)
self.not_required_if(
NO,
field="arv_initiated",
field_required="arv_initiation_actual_date",
inverse=False,
)
if self.cleaned_data.get("art_initiated") == YES and not (
self.cleaned_data.get("arv_initiation_ago")
or self.cleaned_data.get("arv_initiation_actual_date")
):
raise forms.ValidationError(
{"arv_initiation_actual_date": "This field is required (or the above)."}
)
raise_if_both_ago_and_actual_date(
dx_ago=self.cleaned_data.get("arv_initiation_ago"),
dx_date=self.cleaned_data.get("arv_initiation_actual_date"),
)
if self.arv_initiation_date and self.dx_date:
if self.arv_initiation_date < self.dx_date:
field = self.which_field(
ago_field="arv_initiation_ago",
date_field="arv_initiation_actual_date",
)
raise forms.ValidationError(
{field: "Invalid. Cannot start ART before HIV diagnosis."}
)
def validate_viral_load(self):
self.required_if(YES, field="has_vl", field_required="vl")
self.required_if(YES, field="has_vl", field_required="vl_quantifier")
self.required_if(YES, field="has_vl", field_required="vl_date")
if self.cleaned_data.get("vl_date") and self.dx_date:
if self.cleaned_data.get("vl_date") < self.dx_date:
raise forms.ValidationError(
{"vl_date": "Invalid. Cannot be before HIV diagnosis."}
)
def validate_cd4(self):
self.required_if(YES, field="has_cd4", field_required="cd4")
self.required_if(YES, field="has_cd4", field_required="cd4_date")
if self.cleaned_data.get("cd4_date") and self.dx_date:
if self.cleaned_data.get("cd4_date") < self.dx_date:
raise forms.ValidationError(
{"cd4_date": "Invalid. Cannot be before HIV diagnosis."}
)
@property
def dx_date(self):
if self.cleaned_data.get("dx_ago"):
return estimated_date_from_ago(data=self.cleaned_data, ago_field="dx_ago")
return self.cleaned_data.get("dx_date")
@property
def arv_initiation_date(self):
if self.cleaned_data.get("arv_initiation_ago"):
return estimated_date_from_ago(
data=self.cleaned_data, ago_field="arv_initiation_ago"
)
return self.cleaned_data.get("arv_initiation_actual_date")
def which_field(self, ago_field=None, date_field=None):
if self.cleaned_data.get(ago_field):
return ago_field
if self.cleaned_data.get(date_field):
return date_field
return None | /respond_africa-0.1.13-py3-none-any.whl/respond_forms/form_validator_mixins/hiv_initial_review_form_validator.py | 0.721351 | 0.174059 | hiv_initial_review_form_validator.py | pypi |
from edc_constants.constants import YES
from edc_form_validators.form_validator import FormValidator
from edc_lab.form_validators import CrfRequisitionFormValidatorMixin
from edc_reportable import GRADE3, GRADE4, ReportablesFormValidatorMixin
class BloodResultsFormValidatorMixin(
ReportablesFormValidatorMixin, CrfRequisitionFormValidatorMixin, FormValidator
):
reportable_grades = [GRADE3, GRADE4]
reference_list_name = "meta"
requisition_field = None
assay_datetime_field = None
field_names = []
panels = []
poc_panels = []
@property
def field_values(self):
return [self.cleaned_data.get(f) is not None for f in [f for f in self.field_names]]
@property
def extra_options(self):
return {}
def clean(self):
self.required_if_true(any(self.field_values), field_required=self.requisition_field)
if self.cleaned_data.get("is_poc") and self.cleaned_data.get("is_poc") == YES:
self.validate_requisition(
self.requisition_field, self.assay_datetime_field, *self.poc_panels
)
else:
self.validate_requisition(
self.requisition_field, self.assay_datetime_field, *self.panels
)
for field_name in self.field_names:
if f"{field_name}_units" in self.cleaned_data:
self.required_if_not_none(
field=field_name,
field_required=f"{field_name}_units",
field_required_evaluate_as_int=True,
)
if f"{field_name}_abnormal" in self.cleaned_data:
self.required_if_not_none(
field=field_name,
field_required=f"{field_name}_abnormal",
field_required_evaluate_as_int=True,
)
if f"{field_name}_reportable" in self.cleaned_data:
self.required_if_not_none(
field=field_name,
field_required=f"{field_name}_reportable",
field_required_evaluate_as_int=True,
)
self.validate_reportable_fields(
reference_list_name=self.reference_list_name, **self.extra_options
) | /respond_africa-0.1.13-py3-none-any.whl/respond_forms/form_validator_mixins/blood_results_form_validator_mixin.py | 0.710829 | 0.151184 | blood_results_form_validator_mixin.py | pypi |
from datetime import date, datetime
from typing import List, Protocol, Union
from django.db import models
from edc_crf.stubs import MetaModelStub
from edc_list_data.stubs import ListModelMixinStub
from edc_model import models as edc_models
from edc_visit_tracking.stubs import SubjectVisitModelStub
class ClinicalReviewBaselineModelStub(Protocol):
subject_visit: SubjectVisitModelStub
report_datetime: Union[datetime, models.DateTimeField]
dm_dx: models.CharField
dm_test_ago: edc_models.DurationYMDField
dm_test_date: models.DateField
dm_test_estimated_date: models.DateTimeField
hiv_dx: models.CharField
hiv_test_ago: edc_models.DurationYMDField
hiv_test_date: models.DateField
hiv_test_estimated_date: models.DateTimeField
htn_dx: models.CharField
htn_test_ago: edc_models.DurationYMDField
htn_test_date: models.DateField
htn_test_estimated_date: models.DateTimeField
site: models.Manager
history: models.Manager
objects: models.Manager
_meta: MetaModelStub
class ClinicalReviewModelStub(Protocol):
diagnoses_labels: dict
subject_visit: SubjectVisitModelStub
report_datetime: Union[datetime, models.DateTimeField]
dm_dx: models.CharField
dm_test_date: models.DateField
dm_test_estimated_date: models.DateTimeField
hiv_dx: models.CharField
hiv_test_date: models.DateField
hiv_test_estimated_date: models.DateTimeField
htn_dx: models.CharField
htn_test_date: models.DateField
htn_test_estimated_date: models.DateTimeField
site: models.Manager
history: models.Manager
objects: models.Manager
_meta: MetaModelStub
class InitialReviewModelStub(Protocol):
subject_visit: SubjectVisitModelStub
report_datetime: Union[datetime, models.DateTimeField]
dx_ago: str
dx_date: date
dx_estimated_date: date
dx_date_estimated: str
site: models.Manager
history: models.Manager
objects: models.Manager
_meta: MetaModelStub
def get_best_dx_date(self) -> Union[date, datetime]:
...
class NcdInitialReviewModelStub(Protocol):
ncd_condition_label: str
subject_visit: SubjectVisitModelStub
report_datetime: Union[datetime, models.DateTimeField]
dx_ago: str
dx_date: date
dx_estimated_date: date
dx_date_estimated: str
med_start_ago: str
med_start_estimated_date: date
med_start_date_estimated: str
site: models.Manager
history: models.Manager
objects: models.Manager
_meta: MetaModelStub
class DrugSupplyNcdFormMixinStub(Protocol):
cleaned_data: dict
data: dict
list_model_cls: ListModelMixinStub
def clean(self) -> dict:
...
def raise_on_duplicates(self) -> list:
...
@staticmethod
def raise_on_missing_drug(rx_names: List[str], inline_drug_names: List[str]) -> list:
... | /respond_africa-0.1.13-py3-none-any.whl/respond_models/stubs.py | 0.89289 | 0.191365 | stubs.py | pypi |
from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.db.models.deletion import PROTECT
from edc_constants.choices import YES_NO
from edc_model.models import datetime_not_future
from edc_reportable.choices import REPORTABLE
from edc_reportable.units import MILLIMOLES_PER_LITER
from ...constants import BLOOD_RESULTS_LIPID_ACTION
class BloodResultsLipidModelMixin(models.Model):
action_name = BLOOD_RESULTS_LIPID_ACTION
tracking_identifier_prefix = "LP"
lipid_requisition = models.ForeignKey(
settings.SUBJECT_REQUISITION_MODEL,
on_delete=PROTECT,
related_name="lipid",
verbose_name="Requisition",
null=True,
blank=True,
help_text="Start typing the requisition identifier or select one from this visit",
)
lipid_assay_datetime = models.DateTimeField(
verbose_name="Result Report Date and Time",
validators=[datetime_not_future],
null=True,
blank=True,
)
# ldl
ldl = models.DecimalField(
validators=[MinValueValidator(0), MaxValueValidator(999)],
verbose_name="LDL",
max_digits=8,
decimal_places=2,
null=True,
blank=True,
)
ldl_units = models.CharField(
verbose_name="units",
max_length=15,
choices=((MILLIMOLES_PER_LITER, MILLIMOLES_PER_LITER),),
null=True,
blank=True,
)
ldl_abnormal = models.CharField(
verbose_name="abnormal", choices=YES_NO, max_length=25, null=True, blank=True
)
ldl_reportable = models.CharField(
verbose_name="reportable",
choices=REPORTABLE,
max_length=25,
null=True,
blank=True,
)
# hdl
hdl = models.DecimalField(
validators=[MinValueValidator(0), MaxValueValidator(999)],
verbose_name="HDL",
max_digits=8,
decimal_places=2,
null=True,
blank=True,
)
hdl_units = models.CharField(
verbose_name="units",
max_length=15,
choices=((MILLIMOLES_PER_LITER, MILLIMOLES_PER_LITER),),
null=True,
blank=True,
)
hdl_abnormal = models.CharField(
verbose_name="abnormal", choices=YES_NO, max_length=25, null=True, blank=True
)
hdl_reportable = models.CharField(
verbose_name="reportable",
choices=REPORTABLE,
max_length=25,
null=True,
blank=True,
)
# trig
trig = models.DecimalField(
validators=[MinValueValidator(0), MaxValueValidator(999)],
verbose_name="Triglycerides",
max_digits=8,
decimal_places=2,
null=True,
blank=True,
)
trig_units = models.CharField(
verbose_name="units",
max_length=15,
choices=((MILLIMOLES_PER_LITER, MILLIMOLES_PER_LITER),),
null=True,
blank=True,
)
trig_abnormal = models.CharField(
verbose_name="abnormal", choices=YES_NO, max_length=25, null=True, blank=True
)
trig_reportable = models.CharField(
verbose_name="reportable",
choices=REPORTABLE,
max_length=25,
null=True,
blank=True,
)
# chol
chol = models.DecimalField(
validators=[MinValueValidator(0), MaxValueValidator(999)],
verbose_name="Cholesterol",
max_digits=8,
decimal_places=2,
null=True,
blank=True,
)
chol_units = models.CharField(
verbose_name="units",
max_length=15,
choices=((MILLIMOLES_PER_LITER, MILLIMOLES_PER_LITER),),
null=True,
blank=True,
)
chol_abnormal = models.CharField(
verbose_name="abnormal", choices=YES_NO, max_length=25, null=True, blank=True
)
chol_reportable = models.CharField(
verbose_name="reportable",
choices=REPORTABLE,
max_length=25,
null=True,
blank=True,
)
class Meta:
abstract = True
verbose_name = "Blood Result: Lipids"
verbose_name_plural = "Blood Results: Lipids" | /respond_africa-0.1.13-py3-none-any.whl/respond_models/mixins/blood_results/blood_results_lipid_model_mixin.py | 0.522446 | 0.220311 | blood_results_lipid_model_mixin.py | pypi |
from datetime import date
from django.db import models
from edc_constants.choices import YES_NO
from edc_constants.constants import YES
from edc_model import models as edc_models
from ...diagnoses import Diagnoses
from ...stubs import InitialReviewModelStub
from ...utils import calculate_dx_date_if_estimated
class InitialReviewModelError(Exception):
pass
class InitialReviewModelMixin(models.Model):
dx_ago = edc_models.DurationYMDField(
verbose_name="How long ago was the patient diagnosed?",
null=True,
blank=True,
help_text="If possible, provide the exact date below instead of estimating here.",
)
dx_date = models.DateField(
verbose_name="Date patient diagnosed",
null=True,
blank=True,
help_text="If possible, provide the exact date here instead of estimating above.",
)
dx_estimated_date = models.DateField(
verbose_name="Estimated diagnoses date",
null=True,
help_text="Calculated based on response to `dx_ago`",
editable=False,
)
dx_date_estimated = models.CharField(
verbose_name="Was the diagnosis date estimated?",
max_length=15,
choices=YES_NO,
default=YES,
editable=False,
)
def save(self: InitialReviewModelStub, *args, **kwargs):
diagnoses = Diagnoses(
subject_identifier=self.subject_visit.subject_identifier,
report_datetime=self.subject_visit.report_datetime,
lte=True,
)
if not diagnoses.get_dx_by_model(self) == YES:
raise InitialReviewModelError(
"No diagnosis has been recorded. See clinical review. "
"Perhaps catch this in the form."
)
self.dx_estimated_date, self.dx_date_estimated = calculate_dx_date_if_estimated(
self.dx_date,
self.dx_ago,
self.report_datetime,
)
super().save(*args, **kwargs) # type: ignore
def get_best_dx_date(self) -> date:
return self.dx_date or self.dx_estimated_date
class Meta:
abstract = True | /respond_africa-0.1.13-py3-none-any.whl/respond_models/mixins/initial_review/initial_review_model_mixin.py | 0.790975 | 0.16228 | initial_review_model_mixin.py | pypi |
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.safestring import mark_safe
from edc_constants.choices import YES_NO, YES_NO_NA
from edc_constants.constants import NOT_APPLICABLE
from edc_model import models as edc_models
from ...constants import RESPOND_DIAGNOSIS_LABELS
from ...stubs import ClinicalReviewModelStub
class ClinicalReviewModelMixin(models.Model):
diagnoses_labels = RESPOND_DIAGNOSIS_LABELS
complications = models.CharField(
verbose_name="Since last seen, has the patient had any complications",
max_length=15,
choices=YES_NO,
help_text="If Yes, complete the `Complications` CRF",
)
def get_best_test_date(self: ClinicalReviewModelStub, prefix: str):
return getattr(self, f"{prefix}_test_date", None) or getattr(
self, f"{prefix}_test_estimated_datetime", None
)
@property
def diagnoses(self: ClinicalReviewModelStub) -> dict:
if not self.diagnoses_labels:
raise ImproperlyConfigured("Settings attribute RESPOND_DIAGNOSIS_LABELS not set.")
return {k: getattr(self, f"{k}_dx") for k in self.diagnoses_labels}
class Meta:
abstract = True
verbose_name = "Clinical Review"
verbose_name_plural = "Clinical Review"
class ClinicalReviewHivModelMixin(models.Model):
hiv_test = models.CharField(
verbose_name="Since last seen, was the patient tested for HIV infection?",
max_length=15,
choices=YES_NO_NA,
default=NOT_APPLICABLE,
help_text=mark_safe(
"Note: Select `not applicable` if diagnosis previously reported. <BR>"
"`Since last seen` includes today.<BR>"
"If `yes', complete the initial review CRF<BR>"
"If `not applicable`, complete the review CRF."
),
)
hiv_test_date = models.DateField(
verbose_name="Date test requested",
null=True,
blank=True,
)
hiv_reason = models.ManyToManyField(
f"{settings.LIST_MODEL_APP_LABEL}.reasonsfortesting",
related_name="hiv_test_reason",
verbose_name="Why was the patient tested for HIV infection?",
blank=True,
)
hiv_reason_other = edc_models.OtherCharField()
hiv_dx = models.CharField(
verbose_name=mark_safe(
"As of today, was the patient <u>newly</u> diagnosed with HIV infection?"
),
max_length=15,
choices=YES_NO_NA,
default=NOT_APPLICABLE,
)
class Meta:
abstract = True
class ClinicalReviewHtnModelMixin(models.Model):
htn_test = models.CharField(
verbose_name="Since last seen, was the patient tested for hypertension?",
max_length=15,
choices=YES_NO_NA,
default=NOT_APPLICABLE,
help_text=mark_safe(
"Note: Select `not applicable` if diagnosis previously reported. <BR>"
"`Since last seen` includes today.<BR>"
"If `yes', complete the initial review CRF<BR>"
"If `not applicable`, complete the review CRF."
),
)
htn_test_date = models.DateField(
verbose_name="Date test requested",
null=True,
blank=True,
)
htn_reason = models.ManyToManyField(
f"{settings.LIST_MODEL_APP_LABEL}.reasonsfortesting",
related_name="htn_test_reason",
verbose_name="Why was the patient tested for hypertension?",
blank=True,
)
htn_reason_other = edc_models.OtherCharField()
htn_dx = models.CharField(
verbose_name=mark_safe(
"As of today, was the patient <u>newly</u> diagnosed with hypertension?"
),
max_length=15,
choices=YES_NO_NA,
default=NOT_APPLICABLE,
)
class Meta:
abstract = True
class ClinicalReviewDmModelMixin(models.Model):
dm_test = models.CharField(
verbose_name="Since last seen, was the patient tested for diabetes?",
max_length=15,
choices=YES_NO_NA,
default=NOT_APPLICABLE,
help_text=mark_safe(
"Note: Select `not applicable` if diagnosis previously reported. <BR>"
"`Since last seen` includes today.<BR>"
"If `yes', complete the initial review CRF<BR>"
"If `not applicable`, complete the review CRF."
),
)
dm_test_date = models.DateField(
verbose_name="Date test requested",
null=True,
blank=True,
)
dm_reason = models.ManyToManyField(
f"{settings.LIST_MODEL_APP_LABEL}.reasonsfortesting",
related_name="dm_reason",
verbose_name="Why was the patient tested for diabetes?",
blank=True,
)
dm_reason_other = edc_models.OtherCharField()
dm_dx = models.CharField(
verbose_name=mark_safe(
"As of today, was the patient <u>newly</u> diagnosed with diabetes?"
),
max_length=15,
choices=YES_NO_NA,
default=NOT_APPLICABLE,
)
class Meta:
abstract = True
class ClinicalReviewCholModelMixin(models.Model):
chol_test = models.CharField(
verbose_name="Since last seen, was the patient tested for high cholesterol?",
max_length=15,
choices=YES_NO_NA,
default=NOT_APPLICABLE,
help_text=mark_safe(
"Note: Select `not applicable` if diagnosis previously reported. <BR>"
"`Since last seen` includes today.<BR>"
"If `yes', complete the initial review CRF<BR>"
"If `not applicable`, complete the review CRF."
),
)
chol_test_date = models.DateField(
verbose_name="Date test requested",
null=True,
blank=True,
)
chol_reason = models.ManyToManyField(
f"{settings.LIST_MODEL_APP_LABEL}.reasonsfortesting",
related_name="chol_reason",
verbose_name="Why was the patient tested for cholesterol?",
blank=True,
)
chol_reason_other = edc_models.OtherCharField()
chol_dx = models.CharField(
verbose_name=mark_safe(
"As of today, was the patient <u>newly</u> diagnosed with high cholesterol?"
),
max_length=15,
choices=YES_NO_NA,
default=NOT_APPLICABLE,
)
class Meta:
abstract = True | /respond_africa-0.1.13-py3-none-any.whl/respond_models/mixins/clinical_review/clinical_review.py | 0.735071 | 0.184951 | clinical_review.py | pypi |
from django.db import models
from django.utils.html import format_html
from edc_constants.choices import YES_NO, YES_NO_NA
from edc_constants.constants import NOT_APPLICABLE
from edc_model import models as edc_models
from edc_model.models import date_not_future, estimated_date_from_ago
from edc_visit_schedule.constants import DAY1
from ...constants import CONDITION_ABBREVIATIONS
from .clinical_review import ClinicalReviewModelMixin
class ClinicalReviewBaselineError(Exception):
pass
class ClinicalReviewBaselineModelMixin(ClinicalReviewModelMixin):
condition_abbrev = CONDITION_ABBREVIATIONS
def save(self, *args, **kwargs):
if (
self.subject_visit.visit_code != DAY1
and self.subject_visit.visit_code_sequence != 0
):
raise ClinicalReviewBaselineError(
f"This model is only valid at baseline. Got `{self.subject_visit}`."
)
for prefix in self.condition_abbrev:
setattr(
self,
f"{prefix}_test_estimated_date",
estimated_date_from_ago(self, f"{prefix}_test_ago"),
)
super().save(*args, **kwargs)
class Meta:
abstract = True
verbose_name = "Clinical Review: Baseline"
verbose_name_plural = "Clinical Review: Baseline"
class ClinicalReviewBaselineHivModelMixin(models.Model):
hiv_test = models.CharField(
verbose_name="Has the patient ever tested for HIV infection?",
max_length=15,
choices=YES_NO,
)
hiv_test_ago = edc_models.DurationYMDField(
verbose_name="How long ago was the patient's most recent HIV test?",
null=True,
blank=True,
help_text="If positive, most recent HIV(+) test",
)
hiv_test_estimated_date = models.DateField(
null=True,
blank=True,
editable=False,
help_text="calculated by the EDC using `hiv_test_ago`",
)
hiv_test_date = models.DateField(
verbose_name="Date of patient's most recent HIV test?",
validators=[edc_models.date_not_future],
null=True,
blank=True,
)
hiv_dx = models.CharField(
verbose_name=format_html(
"Has the patient ever tested <U>positive</U> for HIV infection?"
),
max_length=15,
choices=YES_NO_NA,
default=NOT_APPLICABLE,
help_text="If yes, complete form `HIV Initial Review`",
)
def save(self, *args, **kwargs):
self.hiv_test_estimated_date = estimated_date_from_ago(self, "hiv_test_ago")
super().save(*args, **kwargs) # type: ignore
class Meta:
abstract = True
class ClinicalReviewBaselineHtnModelMixin(models.Model):
htn_test = models.CharField(
verbose_name="Has the patient ever tested for Hypertension?",
max_length=15,
choices=YES_NO,
)
htn_test_ago = edc_models.DurationYMDField(
verbose_name="If Yes, how long ago was the patient tested for Hypertension?",
null=True,
blank=True,
)
htn_test_estimated_date = models.DateField(
null=True,
blank=True,
help_text="calculated by the EDC using `htn_test_ago`",
)
htn_test_date = models.DateField(
verbose_name="Date of patient's most recent Hypertension test?",
validators=[edc_models.date_not_future],
null=True,
blank=True,
)
htn_dx = models.CharField(
verbose_name=format_html("Has the patient ever been diagnosed with Hypertension"),
max_length=15,
choices=YES_NO_NA,
default=NOT_APPLICABLE,
help_text="If yes, complete form `Hypertension Initial Review`",
)
def save(self, *args, **kwargs):
self.htn_test_estimated_date = estimated_date_from_ago(self, "htn_test_ago")
super().save(*args, **kwargs)
class Meta:
abstract = True
class ClinicalReviewBaselineDmModelMixin(models.Model):
dm_test = models.CharField(
verbose_name="Has the patient ever tested for Diabetes?",
max_length=15,
choices=YES_NO,
)
dm_test_ago = edc_models.DurationYMDField(
verbose_name="If Yes, how long ago was the patient tested for Diabetes?",
null=True,
blank=True,
)
dm_test_estimated_date = models.DateField(
null=True,
blank=True,
help_text="calculated by the EDC using `dm_test_ago`",
)
dm_test_date = models.DateField(
verbose_name="Date of patient's most recent Diabetes test?",
validators=[edc_models.date_not_future],
null=True,
blank=True,
)
dm_dx = models.CharField(
verbose_name=format_html("Have you ever been diagnosed with Diabetes"),
max_length=15,
choices=YES_NO_NA,
default=NOT_APPLICABLE,
help_text="If yes, complete form `Diabetes Initial Review`",
)
def save(self, *args, **kwargs):
self.dm_test_estimated_date = estimated_date_from_ago(self, "dm_test_ago")
super().save(*args, **kwargs) # type: ignore
class Meta:
abstract = True
class ClinicalReviewBaselineCholModelMixin(models.Model):
chol_test = models.CharField(
verbose_name="Has the patient ever tested for High Cholesterol?",
max_length=15,
choices=YES_NO,
)
chol_test_ago = edc_models.DurationYMDField(
verbose_name="If Yes, how long ago was the patient tested for High Cholesterol?",
null=True,
blank=True,
)
chol_test_estimated_date = models.DateField(
null=True,
blank=True,
help_text="calculated by the EDC using `chol_test_ago`",
)
chol_test_date = models.DateField(
verbose_name="Date of patient's most recent Cholesterol test?",
validators=[date_not_future],
null=True,
blank=True,
)
chol_dx = models.CharField(
verbose_name=format_html("Have you ever been diagnosed with High Cholesterol"),
max_length=15,
choices=YES_NO_NA,
default=NOT_APPLICABLE,
help_text="If yes, complete form `High Cholesterol Initial Review`",
)
def save(self, *args, **kwargs):
self.chol_test_estimated_date = estimated_date_from_ago(self, "chol_test_ago")
super().save(*args, **kwargs)
class Meta:
abstract = True | /respond_africa-0.1.13-py3-none-any.whl/respond_models/mixins/clinical_review/clinical_review_baseline.py | 0.713731 | 0.251042 | clinical_review_baseline.py | pypi |
# respond

`respond` is a small, lightweight wrapper around Flask's `make_response` and `jsonify`, providing a fast and convenient
way to return JSON data with the right HTTP status code.
`respond` utilizes HTTP status code descriptions as methods, you simply call a static method
such as `ok`, `not_found` or `internal_server_error` and optionally pass in the data you wish to return as JSON.
🐍 Python v3.6 +
## Installation
```shell script
pip install respond
```
## Usage
Import the `JSONResponse` class
```py3
from respond import JSONResponse
```
You can now call one of many staticmethods of the class
Return a `200 OK` status code and a list
```py3
@app.route("/")
def example():
""" Returns a list with an HTTP 200 OK status code """
return JSONResponse.ok([1, 2, 3])
```
Return a `400 BAD REQUEST` status code and a dict
```py3
@app.route("/")
def example():
""" Returns a dict with an HTTP 400 BAD REQUEST status code """
return JSONResponse.bad_request({"message": "You did something wrong"})
```
Return a `500 INTERNAL SERVER ERROR` status code
```py3
@app.route("/")
def example():
""" Returns an empty string with an HTTP 500 INTERNAL SERVER ERROR status code """
return JSONResponse.bad_request()
```
Passing no data to the method returns an empty string
```py3
@app.route("/")
def ok():
""" Return an empty HTTP 200 OK response """
return JSONResponse.ok()
```
You can optionally pass in a headers dict if required
```py3
@app.route("/")
def example():
""" Return a dict with custom headers """
return JSONResponse.ok(data={"message": "ok"}, headers={"X-Custom-Header": "hello!"})
```
Taking a look in the Chrome developer tools, we can see our custom header:
```shell script
Content-Length: 17
Date: Sun, 03 May 2020 16:49:41 GMT
Content-Type: application/json
Server: Werkzeug/1.0.1 Python/3.8.2
X-Custom-Header: hello!
```
`respond` has methods for all HTTP status codes defined by the ietf - https://tools.ietf.org/html/rfc7231
Common status codes include, `404 NOT FOUND`, here being used in a Flask error handler
```py3
def handle_not_found_error(e):
""" Handler for not found errors """
app.logger.warning(e)
return JSONResponse.not_found(data={"message": "Not found"})
app.register_error_handler(404, handle_not_found_error)
```
And `500 INTERNAL SERVER ERROR`
```py3
@app.route("/internal-server-error")
def internal_server_error():
msg = {"message": "Whoops, we did something wrong"}
return JSONResponse.internal_server_error(msg)
```
Visiting this URL in the browser returns
```shell script
{"message":"Whoops, we did something wrong"}
```
## Flask example
Here's a trivial example, showing `respond` in action
```py3
from flask import Flask
from respond import JSONResponse
def create_app():
app = Flask(__name__)
@app.route("/")
def ok():
""" Return an empty HTTP 200 OK response """
return JSONResponse.ok()
@app.route("/dict")
def d():
""" Return a dict """
return JSONResponse.ok({"message": "ok"})
@app.route("/with-headers")
def with_headers():
""" Return a dict with custom headers """
return JSONResponse.ok(
data={"message": "ok"},
headers={"X-Custom-Header": "hello!"}
)
@app.route("/bad-request")
def bad_request():
""" Return a 400 response with a dict """
data = {"message": "You did something wrong"}
return JSONResponse.bad_request(data=data)
@app.route("/unauthorized")
def unauthorized():
return JSONResponse.unauthorized()
@app.route("/internal-server-error")
def internal_server_error():
msg = {"message": "Whoops, we did something wrong"}
return JSONResponse.internal_server_error(msg)
@app.route("/empty-list")
def ok_empty_list():
""" Return an empty list """
return JSONResponse.ok(data=[])
@app.route("/empty-dict")
def ok_empty_dict():
""" Return an empty dict """
return JSONResponse.ok(data={})
def handle_not_found_error(e):
""" Handler for not found errors """
app.logger.warning(e)
return JSONResponse.not_found(data={"message": "Not found"})
def handle_internal_server_error(e):
""" Handler for internal server errors """
app.logger.error(e)
return JSONResponse.internal_server_error()
app.register_error_handler(404, handle_not_found_error)
app.register_error_handler(500, handle_internal_server_error)
return app
if __name__ == "__main__":
app = create_app()
app.run()
```
## Methods available
**100 range (informational)**
| method | HTTP Status code |
| ------ | ---------------- |
| `continue` | `100 `|
| `switching_protocol` | `101 `|
| `processing` | `102 `|
| `early_hints` | `103 `|
**200 range (success)**
| method | HTTP Status code |
| ------ | ---------------- |
| `ok` | `200 `|
| `created` | `201 `|
| `accepted` | `202 `|
| `non_authoritative_information` | `203 `|
| `no_content` | `204 `|
| `reset_content` | `205 `|
| `partial_content` | `206 `|
| `multi_status` | `207 `|
| `already_reported` | `208 `|
| `im_used` | `226 `|
**300 range (redirection)**
| method | HTTP Status code |
| ------ | ---------------- |
| `multiple_choice` | `300 `|
| `moved_permanently` | `301 `|
| `found` | `302 `|
| `see_other` | `303 `|
| `not_modified` | `304 `|
| `use_proxy` | `305 `|
| `unused` | `306 `|
| `temporary_redirect` | `307 `|
| `permanent_redirect` | `308 `|
**400 range (client error)**
| method | HTTP Status code |
| ------ | ---------------- |
| `bad_request` | `400 `|
| `unauthorized` | `401 `|
| `payment_required` | `402 `|
| `forbidden` | `403 `|
| `not_found` | `404 `|
| `method_not_allowed` | `405 `|
| `not_acceptable` | `406 `|
| `proxy_authentication_required` | `407 `|
| `request_timeout` | `408 `|
| `conflict` | `409 `|
| `gone` | `410 `|
| `length_required` | `411 `|
| `precondition_failed` | `412 `|
| `payload_too_large` | `413 `|
| `uri_too_long` | `414 `|
| `unsupported_media_type` | `415 `|
| `requested_range_not_satisfiable` | `416 `|
| `expectation_failed` | `417 `|
| `im_a_teapot` | `418 `|
| `misdirected_request` | `421 `|
| `unprocessable_entity` | `422 `|
| `locked` | `423 `|
| `failed_dependency` | `424 `|
| `too_early` | `425 `|
| `upgrade_required` | `426 `|
| `precondition_required` | `428 `|
| `too_many_requests` | `429 `|
| `request_header_fields_too_large` | `431 `|
| `unavailable_for_legal_reasons` | `451 `|
**500 range (server error)**
| method | HTTP Status code |
| ------ | ---------------- |
| `internal_server_error` | `500 `|
| `not_implemented` | `501 `|
| `bad_gateway` | `502 `|
| `service_unavailable` | `503 `|
| `gateway_timeout` | `504 `|
| `http_version_not_supported` | `505 `|
| `variant_also_negotiates` | `506 `|
| `insufficient_storage` | `507 `|
| `loop_detected` | `508 `|
| `not_extended` | `510 `|
| `network_authentication_required` | `511 `| | /respond-0.3.tar.gz/respond-0.3/README.md | 0.560012 | 0.911416 | README.md | pypi |
class DynamicValue(object):
def __init__(self, value_type):
self.value_type = value_type
def validate(self, value):
return type(value) == self.value_type
class ResponseChecker:
def __init__(self, control_sample, debug=False):
'''
:param control_sample: the dict which weill be used as a control sample for comparing with testing response.
If the sample have a dynamic valus (tokes, ids, dates and etc), just replace it to DynamicValue(VALUE_TYPE).
Examples is below.
'''
self.control_sample = control_sample
self.debug = debug
def validate(self, testing_response, list_keys=None):
'''
Comparing the dictionaries (the DynamicValue fields checks only types of values)
:param testing_response: the dict with response of testing function/request
:param list_keys: list of keys, which we must consistently pass to get to the compared fragment.
If the key is string, function will try the .get() method, else if integer, its will be using as index
for get from list
For example, if we need check the first value (list) of key "customer" in dict below, the list_keys argument
must be equals ['orders', 'customers', 0]:
{
'orders': { 'id': 1,
'client_fullname': 'Name Lastname',
'time_created': '12-08-2019'
'customers': [{
'client_fullname': 'Name Lastname',
},
{
'client_fullname': 'Name Lastname',
}]
}
}
If argument is None, comparing starts from root keys.
:return: Boolean value. True, if response is valid
'''
# With the help of list_keys we consistently select the section of data to be checked in current method call
control_sample_fragment = self.control_sample
testing_response_fragment = testing_response
if list_keys is None:
list_keys = []
for i in list_keys:
if isinstance(i, str):
control_sample_fragment = control_sample_fragment.get(i)
testing_response_fragment = testing_response_fragment.get(i)
elif isinstance(i, int):
control_sample_fragment = control_sample_fragment[i]
testing_response_fragment = testing_response_fragment[i]
else:
raise AttributeError('Incorrect value in list_keys')
# We will start the verification directly:
# If completely identical, just return True.
# If are lists, we sequentially compare each element, having previously numbered.
# If the key value in the control dictionary is equal to DynamicValue (type), we check only the data types.
# If are dictionaries, we check each key in sequence.
# If none of the above work, the data is not valid.
is_list = isinstance(testing_response_fragment, list)
is_dict = isinstance(testing_response_fragment, dict)
if control_sample_fragment == testing_response_fragment:
pass
elif is_dict and testing_response_fragment.keys() == control_sample_fragment.keys():
for i in control_sample_fragment.keys():
list_keys.append(i)
if not self.validate(testing_response, list_keys):
self.printd('invalid', testing_response, list_keys)
return False
del list_keys[-1]
elif is_list and len(testing_response_fragment) == len(control_sample_fragment):
for index, elem in enumerate(testing_response_fragment):
list_keys.append(index)
if not self.validate(testing_response, list_keys):
self.printd('invalid', testing_response, list_keys)
return False
del list_keys[-1]
elif control_sample_fragment.__class__ == DynamicValue:
control_field_is_valid = control_sample_fragment.validate(testing_response_fragment)
if not control_field_is_valid:
self.printd('invalid type', control_sample_fragment.value_type, testing_response_fragment)
return False
else:
self.printd('wrong values', control_sample_fragment, testing_response_fragment)
return control_sample_fragment == testing_response_fragment
return True
def printd(self, *string):
if self.debug:
print(string) | /response_checker-0.1-py3-none-any.whl/response_checker/response_checker.py | 0.765023 | 0.426322 | response_checker.py | pypi |
import numpy as np
import warnings
from response_functions.common import *
lightspeed = 299792458. #m/s
cm_1 = 2*np.pi*lightspeed*1e2 # angular frequency from cm^-1
def permittivity_Geick(omega):
"""
Epsilon of hexagonal boron nitride/epsilon_0.
This is a two-component permittivity for in-plane electric field,
out-of-plane electric field.
This is based on Geick et al., 1966.
Note that this BN is likely a fairly dirty sample with misaligned
crystallites. It should not be used for exfoliated monocrystals of
h-BN.
"""
## FROM GEICK (1966)
perp = (4.95
+(1.23e5/ 767.**2)*lor(omega, 767.*cm_1,35.*cm_1) #should be inactive
+(3.49e6/1367.**2)*lor(omega,1367.*cm_1,29.*cm_1)
)
par = (4.10
+(3.25e5/ 783.**2)*lor(omega, 783.*cm_1, 8.*cm_1)
+(1.04e6/1510.**2)*lor(omega,1510.*cm_1,80.*cm_1) # should be inactive
)
return perp, par
def permittivity_Cai(omega):
"""
Epsilon of hexagonal boron nitride/epsilon_0.
This is a two-component permittivity for in-plane electric field,
out-of-plane electric field.
This is based on Cai et al., 10.1016/j.ssc.2006.10.040 .
"""
perp = (4.87
+1.83*lor(omega, 1372.*cm_1, 0.)
)
par = (2.95
+ 0.61*lor(omega, 746.*cm_1, 0.)
)
return perp, par
def permittivity_Cai_variable(omega,widthperp = 52.4, widthpar = 15.3):
"""
Epsilon of hexagonal boron nitride/epsilon_0.
This is a two-component permittivity for in-plane electric field,
out-of-plane electric field.
Optional parameters widthperp, widthpar are decay rates
(in cm_1 -- WARNING: NON-CONSISTENT UNITS) to add losses to the
Cai model (see permittivity_Cai) which does not specify losses.
The default losses are made up.
"""
warnings.warn('permittivity_Cai_variable is deprecated - WILL BE REMOVED')
perp = (4.87
+1.83*lor(omega, 1372.*cm_1, widthperp*cm_1)
)
par = (2.95
+ 0.61*lor(omega, 746.*cm_1, widthpar*cm_1)
)
return perp, par
def permittivity_Cai_lossy(omega,decay_inplane=7*cm_1,decay_outplane=2*cm_1):
"""
Epsilon of hexagonal boron nitride/epsilon_0.
This is a two-component permittivity for in-plane electric field,
out-of-plane electric field.
Optional parameters decay_inplane, decay_outplane are amplitude decay
rates (in s^-1) to add losses to the Cai model (see permittivity_Cai)
which does not specify losses.
Their default values are taken from permittivity_Caldwell().
"""
perp = (4.87
+1.83*lor(omega, 1372.*cm_1, decay_inplane)
)
par = (2.95
+ 0.61*lor(omega, 746.*cm_1, decay_outplane)
)
return perp, par
def permittivity_Caldwell(omega):
"""
Epsilon of hexagonal boron nitride/epsilon_0.
This is a two-component permittivity for in-plane electric field,
out-of-plane electric field.
This is a "best guess" by J. Caldwell, used to produce Figure 1b in his
paper arXiv:1404.0494.
"""
perp = (4.90
+ 2.001*lor(omega, 1360.*cm_1, 7*cm_1)
)
par = (2.95
+ 0.5262*lor(omega, 760.*cm_1, 2*cm_1)
)
return perp, par
def permittivity_Caldwell_isotopic(omega, isotope = ''):
isotope_split = isotope.split(sep='_')
switcher = {'10' : _permittivity_Caldwell_10,
'11' : _permittivity_Caldwell_11,
'' : _permittivity_Caldwell_mixed,
'idealized' : _permittivity_Caldwell_idealized}
kwargs = {}
if len(isotope_split) > 1:
kwargs["factor"] = float(isotope_split[1])
return switcher[isotope_split[0]](omega,**kwargs)
def _permittivity_Caldwell_10(omega,**kwargs):
perp = (5.1
+ 2.0400 * lor(omega, 1394.5*cm_1, 1.8*cm_1)
)
par = (2.5
+ 0.3968 * lor(omega, 785.*cm_1, 1*cm_1)
)
return perp, par
def _permittivity_Caldwell_11(omega,**kwargs):
perp = (5.32
+ 2.1267 * lor(omega, 1359.8*cm_1, 2.1*cm_1)
)
par = (3.15
+ 0.5116 * lor(omega, 755.*cm_1, 1*cm_1)
)
return perp, par
def _permittivity_Caldwell_mixed(omega,**kwargs):
perp = (4.90
+ 1.9049*lor(omega, 1366.2*cm_1, 7*cm_1)
)
par = (2.95
+ 0.5262*lor(omega, 760.*cm_1, 2*cm_1)
)
return perp, par
def _permittivity_Caldwell_idealized(omega, factor):
perp = (5.32
+ 2.1267 * lor(omega, 1359.8*cm_1, 2.1*cm_1*factor)
)
par = (3.15
+ 0.5116 * lor(omega, 755.*cm_1, 1*cm_1*factor)
)
return perp, par
permittivity = permittivity_Cai | /response_functions-0.4.tar.gz/response_functions-0.4/response_functions/hexagonal_boron_nitride.py | 0.752468 | 0.533033 | hexagonal_boron_nitride.py | pypi |
from pandas.core.frame import DataFrame
import preprocessor as pp
import pandas as pd
import emoji
import re
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
nltk.download('averaged_perceptron_tagger')
nltk.download('punkt')
nltk.download('wordnet')
df = pd.DataFrame(["one", 'towoi', ';ldkfjs;ijf;e', 'djf;sldkfj;sodikj'])
df = df[df[0].str.len() < 10]
df.head()
# The stopwords that were used come from here (go to the google stop word section):
# https://www.ranks.nl/stopwords
stop_words = ["i","a","about","an","are","as","at","be","by","com","for","from","how","in","is","it","of","on","or","that","the","this","to","was","what","when","where","who","will","with","the","www"]
def clean_text(df, text_column, bert_cleaning=False, tweet_length=240):
"""Cleans the text column in a pandas dataframe. Drops rows wich have empty fields in the text column and duplicates in the text column"""
# Remove "tweets" that are longer than 280 characters
df = df[df[text_column].str.len() < 280]
# Changes all url's and mentions to a token that's the same for all urls and a token that's the same for all mentions
# This:
# Preprocessor is #awesome 👍 https://github.com/s/preprocessor @test
# Becomes this:
# 'Preprocessor is #awesome 👍 $URL$ $MENTION$'
# Go here for the documentation on pp: https://pypi.org/project/tweet-preprocessor/
pp.set_options(pp.OPT.URL, pp.OPT.MENTION)
df[text_column] = [pp.tokenize(text) for text in df[text_column]]
# Removing punctuation
# This: Won't !#$% *&^ hallo?, does this work?
# becomes this: Wont hallo does this work
df[text_column] = [re.sub("[!\"#$%&'()*+,-./:;<=>?@[\]^_`{|}~]", '', text) for text in df[text_column]]
# Change emoji's into tokens too but give every emoji it's own token
# 'Python is 👍' Becomes: 'Python is :thumbs_up:'
# The documentation for the emoji module is here: https://pypi.org/project/emoji/
df[text_column] = [emoji.demojize(text) for text in df[text_column]]
df[text_column] = [re.sub(":", '', text) for text in df[text_column]]
# Changing every upper case letter to lower case
df[text_column] = df[text_column].str.lower()
# Removing unneeded white spaces from the text
df[text_column] = [re.sub('\s+', ' ', text) for text in df[text_column]]
# If we're cleaning the data for a bert model we might want to leave in stop words and unlemmatized versions of words
# because context is important for bert.
if not bert_cleaning:
# Lemmatizing the text
# For lemmatizing we need to know what type of word a word is to lemmatize it.
# The pos_tag function that is used to figure out what the word types are uses
# strings to say what the word types are, but the algorithm that does the
# lemmatization needs a different variable type so this function translates
# between the two. (pos stands for part of speech, which is the same thing as word type)
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
# The object that is going to lemmatize the words
lemmatizer = WordNetLemmatizer()
def lemmatize_sentence(text):
# Tagging the words with their type of word
tagged_words = nltk.pos_tag(nltk.word_tokenize(text))
# lemmatizing the words
lemmatized_sentence = [
lemmatizer.lemmatize(word[0], get_wordnet_pos(word[1])) for word in tagged_words
]
return ' '.join(lemmatized_sentence)
df[text_column] = [lemmatize_sentence(text) for text in df[text_column]]
# removing stop words
def remove_stop_words(text):
tokenized_sentence = nltk.word_tokenize(text)
tokenized_sentence = ["" if token in stop_words else token for token in tokenized_sentence]
return ' '.join(tokenized_sentence)
df[text_column] = [remove_stop_words(text) for text in df[text_column]]
# dropping emty rows and duplicate rows (only looking at the text column)
df = df.dropna(subset=[text_column]).drop_duplicates(subset=[text_column])
df = df[df[text_column].str.len() < 280]
return df | /responsible_ai_datacleaner-0.0.9.tar.gz/responsible_ai_datacleaner-0.0.9/responsible_ai_datacleaner/text_cleaner.py | 0.425128 | 0.237764 | text_cleaner.py | pypi |
import json
import pickle
import warnings
from pathlib import Path
from typing import Any, List, Optional
import numpy as np
import pandas as pd
import shap
from raiutils.data_processing import convert_to_list
from responsibleai._interfaces import (FeatureImportance, ModelExplanationData,
PrecomputedExplanations,
TextFeatureImportance)
from responsibleai._internal.constants import ExplainerManagerKeys as Keys
from responsibleai._internal.constants import (ListProperties, ManagerNames,
Metadata)
from responsibleai._tools.shared.state_directory_management import \
DirectoryManager
from responsibleai.exceptions import UserConfigValidationException
from responsibleai.managers.base_manager import BaseManager
from responsibleai_text.common.constants import (ModelTask,
QuestionAnsweringFields,
Tokens)
from responsibleai_text.utils.question_answering import QAPredictor
CONTEXT = QuestionAnsweringFields.CONTEXT
QUESTIONS = QuestionAnsweringFields.QUESTIONS
SEP = Tokens.SEP
SPARSE_NUM_FEATURES_THRESHOLD = 1000
IS_RUN = 'is_run'
IS_ADDED = 'is_added'
CLASSES = 'classes'
U_EVALUATION_EXAMPLES = '_evaluation_examples'
FEATURES = 'features'
META_JSON = Metadata.META_JSON
MODEL = Metadata.MODEL
EXPLANATION = '_explanation'
TASK_TYPE = '_task_type'
class ExplainerManager(BaseManager):
"""Defines the ExplainerManager for explaining a text-based model."""
def __init__(self, model: Any, evaluation_examples: pd.DataFrame,
target_column: str,
task_type: str,
classes: Optional[List] = None):
"""Creates an ExplainerManager object.
:param model: The model to explain.
A model that implements sklearn.predict or sklearn.predict_proba
or function that accepts a 2d ndarray.
:type model: object
:param evaluation_examples: A matrix of feature vector
examples (# examples x # features) on which to explain the
model's output, with an additional label column.
:type evaluation_examples: pandas.DataFrame
:param target_column: The name of the label column or list of columns.
This is a list of columns for multilabel models.
:type target_column: str or list[str]
:param task_type: The task to run.
:type task_type: str
:param classes: Class names as a list of strings.
The order of the class names should match that of the model
output. Only required if explaining classifier.
:type classes: list
"""
self._model = model
self._target_column = target_column
if not isinstance(target_column, list):
target_column = [target_column]
self._evaluation_examples = \
evaluation_examples.drop(columns=target_column)
self._is_run = False
self._is_added = False
self._features = list(self._evaluation_examples.columns)
self._classes = classes
self._explanation = None
self._task_type = task_type
def add(self):
"""Add an explainer to be computed later."""
if self._model is None:
raise UserConfigValidationException(
'Model is required for model explanations')
if self._is_added:
warnings.warn(("DUPLICATE-EXPLAINER-CONFIG: Ignoring. "
"Explanation has already been added, "
"currently limited to one explainer type."),
UserWarning)
return
self._is_added = True
def compute(self):
"""Creates an explanation by running the explainer on the model."""
if not self._is_added:
return
if self._is_run:
return
if self._is_classification_task:
if hasattr(self._model, 'predict_proba'):
# use model-agnostic simple tokenizer
masker = shap.maskers.Text()
explainer = shap.Explainer(self._model.predict_proba,
masker)
else:
explainer = shap.Explainer(self._model)
eval_examples = self._evaluation_examples.iloc[:, 0].tolist()
self._explanation = explainer(eval_examples)
elif self._task_type == ModelTask.QUESTION_ANSWERING:
qa_predictor = QAPredictor(self._model)
qa_start = qa_predictor.predict_qa_start
qa_start.__func__.output_names = qa_predictor.output_names
explainer = shap.Explainer(qa_start, self._model.tokenizer)
context = self._evaluation_examples[CONTEXT]
questions = self._evaluation_examples[QUESTIONS]
eval_examples = []
for context, question in zip(context, questions):
eval_examples.append(question + SEP + context)
self._explanation = explainer(eval_examples)
else:
raise ValueError("Unknown task type: {}".format(self._task_type))
self._is_run = True
def get(self):
"""Get the computed explanation.
Must be called after add and compute methods.
:return: The computed explanations.
:rtype:
list[interpret_community.explanation.explanation.BaseExplanation]
"""
if self._explanation:
return [self._explanation]
else:
return []
def list(self):
"""List information about the ExplainerManager.
:return: A dictionary of properties.
:rtype: dict
"""
props = {ListProperties.MANAGER_TYPE: self.name}
if self._explanation:
props[Keys.IS_COMPUTED] = True
else:
props[Keys.IS_COMPUTED] = False
return props
def get_data(self):
"""Get explanation data
:return: A array of ModelExplanationData.
:rtype: List[ModelExplanationData]
"""
return [self._get_interpret(i) for i in self.get()]
@property
def _is_multilabel_task(self):
"""Check if the task is a multilabel classification task.
:return: True if the task is a multilabel classification task.
:rtype: bool
"""
return self._task_type == ModelTask.MULTILABEL_TEXT_CLASSIFICATION
@property
def _is_classification_task(self):
"""Check if the task is a classification task.
:return: True if the task is a classification task.
:rtype: bool
"""
is_onelabel_task = self._task_type == ModelTask.TEXT_CLASSIFICATION
is_multilabel_task = self._is_multilabel_task
return is_onelabel_task or is_multilabel_task
def _get_interpret(self, explanation):
interpretation = ModelExplanationData()
try:
importances = FeatureImportance()
features, scores, intercept = self._compute_global_importances(
explanation)
importances.featureNames = features
importances.scores = scores
importances.intercept = intercept
text_feature_importances = self._compute_text_feature_importances(
explanation)
precomputedExplanations = PrecomputedExplanations()
precomputedExplanations.globalFeatureImportance = importances
precomputedExplanations.textFeatureImportance = \
text_feature_importances
interpretation.precomputedExplanations = precomputedExplanations
except Exception as ex:
raise ValueError(
"Unsupported explanation type") from ex
return interpretation
def _compute_global_importances(self, explanation):
"""Compute global feature importances.
:param explanation: The explanation.
:type explanation: shap.Explanation
:return: The feature names, scores, and intercept.
:rtype: tuple[list[str], list[float], float]
"""
is_classif_task = self._is_classification_task
if is_classif_task:
global_exp = explanation[:, :, :].mean(0)
features = convert_to_list(global_exp.feature_names)
scores = convert_to_list(np.abs(global_exp.values).mean(1))
intercept = global_exp.base_values.mean(0)
elif self._task_type == ModelTask.QUESTION_ANSWERING:
flattened_features = explanation._flatten_feature_names()
scores = []
features = []
for key in flattened_features.keys():
features.append(key)
token_importances = []
for importances in flattened_features[key]:
token_importances.append(np.mean(np.abs(importances)))
scores.append(np.mean(token_importances))
base_values = [
base_values.mean()
for base_values in explanation.base_values]
intercept = sum(base_values) / len(base_values)
else:
raise ValueError("Unknown task type: {}".format(self._task_type))
return features, scores, intercept
def _compute_text_feature_importances(self, explanation):
"""Compute the text feature importances.
:param explanation: The explanation.
:type explanation: shap.Explanation
:return: The text importances and corresponding tokens.
:rtype: tuple[list[str], list[float], float]
"""
text_feature_importances = []
is_classif_task = self._is_classification_task
for instance in explanation:
text_feature_importance = TextFeatureImportance()
if is_classif_task:
text_feature_importance.localExplanations = \
instance.values.tolist()
text_feature_importance.text = instance.data
elif self._task_type == ModelTask.QUESTION_ANSWERING:
# TODO: This is a bit more complicated, as it's
# a map of importances for each token from question
# to answer and the other way around.
continue
else:
raise ValueError("Unknown task type: {}".format(
self._task_type))
text_feature_importances.append(text_feature_importance)
return text_feature_importances
@property
def name(self):
"""Get the name of the explainer manager.
:return: The name of the explainer manager.
:rtype: str
"""
return ManagerNames.EXPLAINER
def _save(self, path):
"""Save the ExplainerManager to the given path.
:param path: The directory path to save the ExplainerManager to.
:type path: str
"""
top_dir = Path(path)
top_dir.mkdir(parents=True, exist_ok=True)
if self._is_added:
directory_manager = DirectoryManager(parent_directory_path=path)
data_directory = directory_manager.create_data_directory()
# save the explanation
if self._explanation:
with open(data_directory / ManagerNames.EXPLAINER, 'wb') as f:
pickle.dump(self._explanation, f)
meta = {IS_RUN: self._is_run,
IS_ADDED: self._is_added}
with open(data_directory / META_JSON, 'w') as file:
json.dump(meta, file)
@staticmethod
def _load(path, rai_insights):
"""Load the ExplainerManager from the given path.
:param path: The directory path to load the ExplainerManager from.
:type path: str
:param rai_insights: The loaded parent RAIInsights.
:type rai_insights: RAIInsights
:return: The ExplainerManager manager after loading.
:rtype: ExplainerManager
"""
# create the ExplainerManager without any properties using the __new__
# function, similar to pickle
inst = ExplainerManager.__new__(ExplainerManager)
all_cf_dirs = DirectoryManager.list_sub_directories(path)
if len(all_cf_dirs) != 0:
directory_manager = DirectoryManager(
parent_directory_path=path,
sub_directory_name=all_cf_dirs[0])
data_directory = directory_manager.get_data_directory()
with open(data_directory / META_JSON, 'r') as meta_file:
meta = meta_file.read()
meta = json.loads(meta)
inst.__dict__['_' + IS_RUN] = meta[IS_RUN]
inst.__dict__['_' + IS_ADDED] = meta[IS_ADDED]
inst.__dict__[EXPLANATION] = None
explanation_path = data_directory / ManagerNames.EXPLAINER
if explanation_path.exists():
with open(explanation_path, 'rb') as f:
explanation = pickle.load(f)
inst.__dict__[EXPLANATION] = explanation
else:
inst.__dict__['_' + IS_RUN] = False
inst.__dict__['_' + IS_ADDED] = False
inst.__dict__[EXPLANATION] = None
inst.__dict__['_' + MODEL] = rai_insights.model
inst.__dict__['_' + CLASSES] = rai_insights._classes
target_column = rai_insights.target_column
if not isinstance(target_column, list):
target_column = [target_column]
test = rai_insights.test.drop(columns=target_column)
inst.__dict__[U_EVALUATION_EXAMPLES] = test
inst.__dict__['_' + FEATURES] = list(test.columns)
inst.__dict__[TASK_TYPE] = rai_insights.task_type
return inst | /responsibleai_text-0.1.4-py3-none-any.whl/responsibleai_text/managers/explainer_manager.py | 0.829871 | 0.362461 | explainer_manager.py | pypi |
import json
from typing import Any, List, Optional, Union
import jsonschema
import numpy as np
import pandas as pd
from ml_wrappers import wrap_model
from erroranalysis._internal.error_analyzer import ModelAnalyzer
from erroranalysis._internal.error_report import as_error_report
from responsibleai._tools.shared.state_directory_management import \
DirectoryManager
from responsibleai.managers.error_analysis_manager import \
ErrorAnalysisManager as BaseErrorAnalysisManager
from responsibleai.managers.error_analysis_manager import as_error_config
from responsibleai_text.common.constants import ModelTask
from responsibleai_text.utils.feature_extractors import get_text_columns
LABELS = 'labels'
def _concat_labels_column(dataset, target_column, classes):
"""Concatenate labels column for multilabel models.
:param dataset: The dataset including the label column.
:type dataset: pandas.DataFrame
:param target_column: The list of label columns in multilabel task.
:type target_column: list[str]
:param classes: The list of labels in multilabel task.
:type classes: list
:return: The labels column concatenated.
:rtype: list
"""
labels = []
for _, row in dataset[target_column].iterrows():
row_idxs = range(len(row))
pred_classes = [classes[i] for i in row_idxs if row[i]]
labels.append(','.join(pred_classes))
return labels
class WrappedIndexPredictorModel:
"""Wraps model that uses index to retrieve text data for making
predictions."""
def __init__(self, model, dataset, is_multilabel, task_type, classes=None):
"""Initialize the WrappedIndexPredictorModel.
:param model: The model to wrap.
:type model: object
:param dataset: The dataset to use for making predictions.
:type dataset: pandas.DataFrame
:param is_multilabel: Whether the model is multilabel.
:type is_multilabel: bool
:param task_type: The task to run.
:type task_type: str
:param classes: The classes for the model.
:type classes: list
"""
self.model = model
self.dataset = dataset
self.classes = classes
self.is_multilabel = is_multilabel
self.task_type = task_type
classif_tasks = [ModelTask.TEXT_CLASSIFICATION,
ModelTask.MULTILABEL_TEXT_CLASSIFICATION]
if self.task_type in classif_tasks:
dataset = self.dataset.iloc[:, 0].tolist()
self.predictions = self.model.predict(dataset)
self.predict_proba = self.model.predict_proba(dataset)
elif self.task_type == ModelTask.QUESTION_ANSWERING:
self.predictions = self.model.predict(
self.dataset.loc[:, ['context', 'questions']])
self.predictions = np.array(self.predictions)
else:
raise ValueError("Unknown task type: {}".format(self.task_type))
if self.is_multilabel:
predictions_joined = []
for row in self.predictions:
# get all labels where prediction is 1
pred_labels = [i for i in range(len(row)) if row[i]]
if self.classes is not None:
pred_labels = [self.classes[i] for i in pred_labels]
else:
pred_labels = [str(i) for i in pred_labels]
# concatenate all predicted labels into a single string
predictions_joined.append(','.join(pred_labels))
self.predictions = np.array(predictions_joined)
def predict(self, X):
"""Predict the class labels for the provided data.
:param X: Data to predict the labels for.
:type X: pandas.DataFrame
:return: Predicted class labels.
:rtype: list
"""
index = X.index
predictions = self.predictions[index]
if self.task_type == ModelTask.MULTILABEL_TEXT_CLASSIFICATION:
return predictions
if self.classes is not None:
predictions = [self.classes[y] for y in predictions]
return predictions
def predict_proba(self, X):
"""Predict the class probabilities for the provided data.
:param X: Data to predict the probabilities for.
:type X: pandas.DataFrame
:return: Predicted class probabilities.
:rtype: list[list]
"""
index = X.index
pred_proba = self.predict_proba[index]
return pred_proba
class ErrorAnalysisManager(BaseErrorAnalysisManager):
"""Defines a wrapper class of Error Analysis for text scenario."""
def __init__(self, model: Any, dataset: pd.DataFrame,
ext_dataset: pd.DataFrame, target_column: str,
text_column: Optional[Union[str, List]],
task_type: str, classes: Optional[List] = None,
categorical_features: Optional[List[str]] = None):
"""Creates an ErrorAnalysisManager object.
:param model: The model to analyze errors on.
A model that implements sklearn.predict or sklearn.predict_proba
or function that accepts a 2d ndarray.
:type model: object
:param dataset: The dataset including the label column.
:type dataset: pandas.DataFrame
:param ext_dataset: The dataset of extracted features including the
label column.
:type ext_dataset: pandas.DataFrame
:param target_column: The name of the label column or list of columns.
This is a list of columns for multilabel models.
:type target_column: str or list[str]
:param text_column: The name of the text column or list of columns.
This is a list of columns for question answering models.
:type text_column: str or list[str]
:param task_type: The task to run.
:type task_type: str
:param classes: Class names as a list of strings.
The order of the class names should match that of the model
output. Only required if analyzing a classifier.
:type classes: list
:param categorical_features: The categorical feature names.
:type categorical_features: list[str]
"""
is_multilabel = False
index_classes = classes
if isinstance(target_column, list):
# create copy of dataset as we will make modifications to it
dataset = dataset.copy()
index_classes = target_column
labels = _concat_labels_column(dataset, target_column,
index_classes)
dataset[LABELS] = labels
ext_dataset[LABELS] = dataset[LABELS]
dataset.drop(columns=target_column, inplace=True)
ext_dataset.drop(columns=target_column, inplace=True)
target_column = LABELS
is_multilabel = True
index_predictor = ErrorAnalysisManager._create_index_predictor(
model, dataset, target_column, text_column, is_multilabel,
task_type, index_classes)
if categorical_features is None:
categorical_features = []
super(ErrorAnalysisManager, self).__init__(
index_predictor, ext_dataset, target_column,
classes, categorical_features)
@staticmethod
def _create_index_predictor(model, dataset, target_column,
text_column, is_multilabel,
task_type, classes=None):
"""Creates a wrapped predictor that uses index to retrieve text data.
:param model: The model to analyze errors on.
A model that implements sklearn.predict or sklearn.predict_proba
or function that accepts a 2d ndarray.
:type model: object
:param dataset: The dataset including the label column.
:type dataset: pandas.DataFrame
:param target_column: The name of the label column or list of columns.
This is a list of columns for multilabel models.
:type target_column: str or list[str]
:param text_column: The name of the text column or list of columns.
This is a list of columns for question answering models.
:type text_column: str or list[str]
:param is_multilabel: Whether the model is multilabel.
:type is_multilabel: bool
:param task_type: The task to run.
:type task_type: str
:param classes: Class names as a list of strings.
The order of the class names should match that of the model
output.
:type classes: list
:return: A wrapped predictor that uses index to retrieve text data.
:rtype: WrappedIndexPredictorModel
"""
dataset = dataset.drop(columns=[target_column])
dataset = get_text_columns(dataset, text_column)
index_predictor = WrappedIndexPredictorModel(
model, dataset, is_multilabel, task_type, classes)
return index_predictor
@staticmethod
def _load(path, rai_insights):
"""Load the ErrorAnalysisManager from the given path.
:param path: The directory path to load the ErrorAnalysisManager from.
:type path: str
:param rai_insights: The loaded parent RAIInsights.
:type rai_insights: RAIInsights
:return: The ErrorAnalysisManager manager after loading.
:rtype: ErrorAnalysisManager
"""
# create the ErrorAnalysisManager without any properties using
# the __new__ function, similar to pickle
inst = ErrorAnalysisManager.__new__(ErrorAnalysisManager)
ea_config_list = []
ea_report_list = []
all_ea_dirs = DirectoryManager.list_sub_directories(path)
for ea_dir in all_ea_dirs:
directory_manager = DirectoryManager(
parent_directory_path=path,
sub_directory_name=ea_dir)
config_path = (directory_manager.get_config_directory() /
'config.json')
with open(config_path, 'r') as file:
ea_config = json.load(file, object_hook=as_error_config)
ea_config_list.append(ea_config)
report_path = (directory_manager.get_data_directory() /
'report.json')
with open(report_path, 'r') as file:
ea_report = json.load(file, object_hook=as_error_report)
# Validate the serialized output against schema
schema = ErrorAnalysisManager._get_error_analysis_schema()
jsonschema.validate(
json.loads(ea_report.to_json()), schema)
ea_report_list.append(ea_report)
inst.__dict__['_ea_report_list'] = ea_report_list
inst.__dict__['_ea_config_list'] = ea_config_list
feature_metadata = rai_insights._feature_metadata
categorical_features = feature_metadata.categorical_features
inst.__dict__['_categorical_features'] = categorical_features
target_column = rai_insights.target_column
true_y = rai_insights._ext_test_df[target_column]
if isinstance(target_column, list):
dropped_cols = target_column
else:
dropped_cols = [target_column]
dataset = rai_insights._ext_test_df.drop(columns=dropped_cols)
inst.__dict__['_dataset'] = dataset
feature_names = list(dataset.columns)
inst.__dict__['_feature_names'] = feature_names
wrapped_model = wrap_model(rai_insights.model, dataset,
rai_insights.task_type)
is_multilabel = False
index_classes = rai_insights._classes
index_dataset = rai_insights.test
if isinstance(target_column, list):
index_dataset = index_dataset.copy()
index_classes = target_column
labels = _concat_labels_column(index_dataset, target_column,
index_classes)
index_dataset.drop(columns=target_column, inplace=True)
index_dataset[LABELS] = labels
target_column = LABELS
is_multilabel = True
true_y = index_dataset[target_column]
inst.__dict__['_true_y'] = true_y
inst.__dict__['_task_type'] = rai_insights.task_type
text_column = rai_insights._text_column
index_predictor = ErrorAnalysisManager._create_index_predictor(
wrapped_model, index_dataset, target_column, text_column,
is_multilabel, rai_insights.task_type, index_classes)
inst.__dict__['_analyzer'] = ModelAnalyzer(index_predictor,
dataset,
true_y,
feature_names,
categorical_features)
return inst | /responsibleai_text-0.1.4-py3-none-any.whl/responsibleai_text/managers/error_analysis_manager.py | 0.901564 | 0.420124 | error_analysis_manager.py | pypi |
import logging
from responsibleai_text.common.constants import Tokens
module_logger = logging.getLogger(__name__)
module_logger.setLevel(logging.INFO)
try:
import torch
except ImportError:
module_logger.debug(
'Could not import torch, required if using a pytorch model')
SEP = Tokens.SEP
class QAPredictor:
def __init__(self, qa_model):
"""Initialize the Question Answering predictor.
:param qa_model: The question answering model.
:type qa_model: QuestionAnsweringModel
"""
self._qa_model = qa_model
def predict_qa(self, questions, start):
"""Define predictions outputting the logits for range start and end.
:param questions: The questions and context to predict on.
:type questions: list[str]
:param start: Whether to predict the start of the range.
:type start: bool
:return: The logits for the start and end of the range.
:rtype: list[list[float]]
"""
outs = []
for q in questions:
question, context = q.split(SEP)
d = self._qa_model.tokenizer(question, context)
out = self._qa_model.model.forward(
**{k: torch.tensor(d[k]).reshape(1, -1) for k in d})
logits = out.start_logits if start else out.end_logits
outs.append(logits.reshape(-1).detach().numpy())
return outs
def predict_qa_start(self, questions):
"""Define predictions outputting the logits for the start of the range.
:param questions: The questions and context to predict on.
:type questions: list[str]
:return: The logits for the start of the range.
:rtype: list[list[float]]
"""
return self.predict_qa(questions, True)
def output_names(self, inputs):
"""Define the output names as tokens.
:param inputs: The inputs to the model.
:type inputs: list[str]
:return: The output names as the decoded tokens.
:rtype: list[str]
"""
question, context = inputs.split(SEP)
d = self._qa_model.tokenizer(question, context)
return [self._qa_model.tokenizer.decode([id]) for id in d["input_ids"]] | /responsibleai_text-0.1.4-py3-none-any.whl/responsibleai_text/utils/question_answering.py | 0.781122 | 0.498718 | question_answering.py | pypi |
import re
from typing import List, Optional, Union
import pandas as pd
import spacy
from negspacy.termsets import termset
from tqdm import tqdm
from nlp_feature_extractors import attribute_extractors as exts
from responsibleai_text.common.constants import (ModelTask,
QuestionAnsweringFields)
nlp = None
def extract_features(text_dataset: pd.DataFrame,
target_column: Union[str, List], task_type: str,
dropped_features: Optional[List[str]] = None):
'''Extract tabular data features from the text dataset.
:param text_dataset: A pandas dataframe containing the text data.
:type text_dataset: pandas.DataFrame
:param target_column: The name of the label column or list of columns.
This is a list of columns for multilabel models.
:type target_column: str or list[str]
:param task_type: The type of task to be performed.
:type task_type: str
:param dropped_features: The list of features to be dropped.
:type dropped_features: list[str]
:return: The list of extracted features and the feature names.
:rtype: list, list
'''
results = []
base_feature_names = ["positive_words", "negative_words",
"negation_words", "negated_entities",
"named_persons", "sentence_length"]
single_text_col_tasks = [ModelTask.TEXT_CLASSIFICATION,
ModelTask.MULTILABEL_TEXT_CLASSIFICATION]
has_dropped_features = dropped_features is not None
start_meta_index = 2
column_names = text_dataset.columns
if isinstance(target_column, list):
start_meta_index = len(target_column) + 1
if task_type in single_text_col_tasks:
feature_names = base_feature_names
elif task_type == ModelTask.QUESTION_ANSWERING:
start_meta_index += 1
feature_names = []
prefixes = [QuestionAnsweringFields.CONTEXT + "_",
QuestionAnsweringFields.QUESTION + "_"]
for prefix in prefixes:
for feature_name in base_feature_names:
feature_names.append(prefix + feature_name)
feature_names.append(prefix + "average_parse_tree_depth")
feature_names.append(prefix + "maximum_parse_tree_depth")
feature_names.append("question_type")
feature_names.append("context_overlap")
else:
raise ValueError("Unknown task type: {}".format(task_type))
# copy over the metadata column names
for j in range(start_meta_index, text_dataset.shape[1]):
if has_dropped_features and column_names[j] in dropped_features:
continue
feature_names.append(column_names[j])
if not isinstance(target_column, list):
target_column = [target_column]
text_features = text_dataset.drop(target_column, axis=1)
if task_type in single_text_col_tasks:
sentences = text_features.iloc[:, 0].tolist()
for i, sentence in tqdm(enumerate(sentences)):
extracted_features = []
add_extracted_features_for_sentence(sentence, extracted_features)
# append all other metadata features
append_metadata_values(start_meta_index, text_dataset, i,
extracted_features, has_dropped_features,
dropped_features, column_names)
results.append(extracted_features)
elif task_type == ModelTask.QUESTION_ANSWERING:
for i, row in tqdm(text_features.iterrows()):
extracted_features = []
add_extracted_features_for_sentence(
row[QuestionAnsweringFields.CONTEXT], extracted_features,
task_type)
add_extracted_features_for_sentence(
row[QuestionAnsweringFields.QUESTIONS], extracted_features,
task_type, sentence_type="QUESTION")
context = row[QuestionAnsweringFields.CONTEXT]
question = row[QuestionAnsweringFields.QUESTIONS]
context_overlap = get_context_overlap(context=context,
question=question)
extracted_features.append(context_overlap)
# append all other metadata features
append_metadata_values(start_meta_index, text_dataset, i,
extracted_features, has_dropped_features,
dropped_features, column_names)
results.append(extracted_features)
else:
raise ValueError("Unknown task type: {}".format(task_type))
return results, feature_names
def append_metadata_values(start_meta_index, text_dataset, i,
extracted_features, has_dropped_features,
dropped_features, column_names):
"""Append the metadata values to the extracted features.
Note this also modifies the input array in-place.
:param start_meta_index: The index of the first metadata column.
:type start_meta_index: int
:param text_dataset: The text dataset.
:type text_dataset: pandas.DataFrame
:param i: The index of the current row.
:type i: int
:param extracted_features: The list of extracted features.
:type extracted_features: list
:param has_dropped_features: Whether there are dropped features.
:type has_dropped_features: bool
:param dropped_features: The list of dropped features.
:type dropped_features: list
:param column_names: The list of column names.
:type column_names: list
:return: The list of extracted features.
:rtype: list
"""
# append all other metadata features
for j in range(start_meta_index, text_dataset.shape[1]):
if has_dropped_features and column_names[j] in dropped_features:
continue
extracted_features.append(text_dataset.iloc[i][j])
return extracted_features
def get_text_columns(text_dataset: pd.DataFrame,
text_column: Optional[Union[str, List]]):
"""Get the text columns for prediction.
:param text_dataset: The text dataset.
:type text_dataset: pd.DataFrame
:param text_column: The name of the text column or list of columns.
:type text_column: str or list[str]
:return: The text columns for prediction.
:rtype: pd.DataFrame
"""
text_exists = not not text_column
num_cols = len(text_dataset.columns)
is_list = isinstance(text_column, list)
text_cols = len(text_column) if is_list else 1
# Drop metadata columns before calling predict
if text_exists and num_cols - text_cols > 0:
if not is_list:
text_column = [text_column]
text_dataset = text_dataset[text_column]
return text_dataset
def add_extracted_features_for_sentence(sentence, extracted_features,
task_type=None, sentence_type=None):
"""Add the extracted features for a sentence.
Note this also modifies the input array in-place.
:param sentence: The sentence to extract features from.
:type sentence: str
:param extracted_features: The list of extracted features.
:type extracted_features: list
:param task_type: The type of task to be performed.
:type task_type: str
:param sentence_type: The type of sentence to be processed.
:type sentence_type: str
:return: The list of extracted features.
:rtype: list
"""
global nlp
if nlp is None:
nlp = spacy.load("en_core_web_sm")
ts = termset("en")
nlp.add_pipe("negex", config={"neg_termset": ts.get_patterns()})
doc = nlp(sentence)
positive_negative_count = exts.positive_negative_word_count(doc)
named_persons = exts.get_named_persons(doc)
neg_words_and_entities = exts.detect_negation_words_and_entities(doc)
sentence_length = len(sentence)
features = [positive_negative_count["positive_word_count"],
positive_negative_count["negative_word_count"],
neg_words_and_entities["negation_words"],
neg_words_and_entities["negated_entities"],
len(named_persons),
sentence_length]
if task_type == ModelTask.QUESTION_ANSWERING:
features.append(get_average_depth(doc))
features.append(get_max_depth(doc))
if sentence_type == 'QUESTION':
question_type = get_question_type(sentence)
features.append(question_type)
# TODO: This extractor seems to be very slow:
# mf_count = exts.get_male_female_words_count(doc)
extracted_features.extend(features)
def get_question_type(qtext):
"""Get the question type.
:param qtext: The question text.
:type qtext: str
:return: The question type.
:rtype: str
"""
if re.search(r'\b\A(can|could|will|would|have|has' +
r'|do|does|did|is|are|was|may|might)\s', qtext, re.I):
return "YES/NO"
elif re.search(r'\b\A(what|which)(\'s|\'re)?\s+(\w+)', qtext, re.I):
nextword = re.search(r'\b\A(what|which)(\'s|\'re)?\s+(\w+)',
qtext, re.I).group(3)
if nextword in ["year", "month", "date", "day"]:
return "WHEN"
else:
return "WHAT"
elif re.search(r'\bwho(\'s|\'re)?\s', qtext, re.I):
return "WHO"
elif re.search(r'\bwhy(\'s|\'re)?\s', qtext, re.I):
return "WHY"
elif re.search(r'\bwhere(\'s|\'re)?\s', qtext, re.I):
return "WHERE"
elif re.search(r'\bhow(\'s|\'re)?\s', qtext, re.I):
nextword = re.search(r'\b(how)(\'s|\'re)?\s(\w+)',
qtext, re.I).group(3)
if nextword in ["many", "much", "long", "old", "often"]:
return "NUMBER"
else:
return "HOW"
elif re.search(r'\bwhen(\'s|\'re)?\s', qtext, re.I):
return "WHEN"
elif re.search(r'\b(in|on|at|by|for|to|from|during|within)' +
r'\s+(what|which)\s+(year|month|day|date|time)\s',
qtext, re.I):
return "WHEN"
elif re.search(r'\bto\swhom\s', qtext, re.I):
return "WHO"
else:
return "OTHER"
def get_parse_tree_depth(root):
"""Get the parse tree depth.
:param root: The root of the parse tree.
:type root: spacy.tokens.token.Token
:return: The parse tree depth.
:rtype: int
"""
if not list(root.children):
return 1
else:
return 1 + max(get_parse_tree_depth(x) for x in root.children)
def get_average_depth(doc):
"""Get the average parse tree depth.
:param doc: The document to process.
:type doc: spacy.tokens.doc.Doc
:return: The average parse tree depth.
:rtype: float
"""
roots = []
for each in doc.sents:
roots.append([token for token in each if token.head == token][0])
parse_tree_depths = [get_parse_tree_depth(root) for root in roots]
return sum(parse_tree_depths) / len(parse_tree_depths)
def get_max_depth(doc):
"""Get the maximum parse tree depth.
:param doc: The document to process.
:type doc: spacy.tokens.doc.Doc
:return: The maximum parse tree depth.
:rtype: int
"""
roots = []
for each in doc.sents:
roots.append([token for token in each if token.head == token][0])
return max([get_parse_tree_depth(root) for root in roots])
def is_base_token(token):
"""Check if the token is a base token.
:param token: The token.
:type token: spacy.tokens.token.Token
:return: True if the token is a base token, False otherwise.
:rtype: bool
"""
return not token.is_stop and not token.is_punct
def get_context_overlap(context, question):
"""Get the context overlap.
:param context: The context.
:type context: str
:param question: The question.
:type question: str
:return: The context overlap.
:rtype: float
"""
global nlp
if nlp is None:
nlp = spacy.load("en_core_web_sm")
doc_q = nlp(question)
doc_c = nlp(context)
# get tokens in base form
tokens_q = set([token.lemma_ for token in doc_q if is_base_token(token)])
tokens_c = set([token.lemma_ for token in doc_c if is_base_token(token)])
intersection = tokens_q.intersection(tokens_c)
# size of intersection token set / size of question token set
overlap_ratio = len(intersection) / len(tokens_q)
return round(overlap_ratio, 3) | /responsibleai_text-0.1.4-py3-none-any.whl/responsibleai_text/utils/feature_extractors.py | 0.869618 | 0.355048 | feature_extractors.py | pypi |
import base64
import io
import json
import os
import pickle
import shutil
import warnings
from enum import Enum
from pathlib import Path
from typing import Any, Optional
import matplotlib.pyplot as pl
import numpy as np
import pandas as pd
import torch
from ml_wrappers import wrap_model
from ml_wrappers.common.constants import Device
from torchmetrics.detection.mean_ap import MeanAveragePrecision
from erroranalysis._internal.cohort_filter import FilterDataWithCohortFilters
from raiutils.data_processing import convert_to_list
from raiutils.models.model_utils import SKLearn
from responsibleai._interfaces import Dataset, RAIInsightsData
from responsibleai._internal.constants import (ManagerNames, Metadata,
SerializationAttributes)
from responsibleai.exceptions import UserConfigValidationException
from responsibleai.feature_metadata import FeatureMetadata
from responsibleai.rai_insights.rai_base_insights import RAIBaseInsights
from responsibleai.serialization_utilities import serialize_json_safe
from responsibleai_vision.common.constants import (CommonTags,
ExplainabilityDefaults,
ImageColumns,
MLFlowSchemaLiterals,
ModelTask)
from responsibleai_vision.managers.error_analysis_manager import \
ErrorAnalysisManager
from responsibleai_vision.managers.explainer_manager import ExplainerManager
from responsibleai_vision.utils.feature_extractors import extract_features
from responsibleai_vision.utils.image_reader import (
get_base64_string_from_path, get_image_from_path, is_automl_image_model)
from responsibleai_vision.utils.image_utils import (
convert_images, get_images, transform_object_detection_labels)
IMAGE = ImageColumns.IMAGE.value
IMAGE_URL = ImageColumns.IMAGE_URL.value
DEFAULT_MAX_EVALS = ExplainabilityDefaults.DEFAULT_MAX_EVALS
DEFAULT_NUM_MASKS = ExplainabilityDefaults.DEFAULT_NUM_MASKS
DEFAULT_MASK_RES = ExplainabilityDefaults.DEFAULT_MASK_RES
_IMAGE_MODE = 'image_mode'
_IMAGE_DOWNLOADER = 'image_downloader'
_IMAGE_WIDTH = 'image_width'
_MAX_EVALS = 'max_evals'
_NUM_MASKS = 'num_masks'
_MASK_RES = 'mask_res'
_DEVICE = 'device'
_PREDICTIONS = 'predictions'
_TEST = 'test'
_TARGET_COLUMN = 'target_column'
_TASK_TYPE = 'task_type'
_CLASSES = 'classes'
_META_JSON = Metadata.META_JSON
_JSON_EXTENSION = '.json'
_PREDICT = 'predict'
_PREDICT_PROBA = 'predict_proba'
_EXT_TEST = '_ext_test'
_EXT_FEATURES = '_ext_features'
_MODEL = Metadata.MODEL
_MODEL_PKL = _MODEL + '.pkl'
_SERIALIZER = 'serializer'
_TRANSFORMATIONS = 'transformations'
_MLTABLE_DIR = 'mltables'
_MLTABLE_METADATA_FILENAME = 'metadata.json'
_TEST_MLTABLE_PATH = 'test_mltable_path'
_FEATURE_METADATA = Metadata.FEATURE_METADATA
_IDENTITY_FEATURE_NAME = 'identity_feature_name'
_DATETIME_FEATURES = 'datetime_features'
_TIME_SERIES_ID_FEATURES = 'time_series_id_features'
_CATEGORICAL_FEATURES = 'categorical_features'
_DROPPED_FEATURES = 'dropped_features'
def reshape_image(image):
"""Reshape image to have one extra dimension for rows.
:param image: Image to reshape.
:type image: numpy.ndarray
:return: Reshaped image.
:rtype: numpy.ndarray
"""
image_shape_len = len(image.shape)
if image_shape_len != 2 and image_shape_len != 3:
raise ValueError('Image must have 2 or 3 dimensions')
return np.expand_dims(image, axis=0)
class RAIVisionInsights(RAIBaseInsights):
"""Defines the top-level RAIVisionInsights API.
Use RAIVisionInsights to assess vision machine learning models in a
single API.
"""
def __init__(self, model: Any,
test: pd.DataFrame,
target_column: str, task_type: str,
classes: Optional[np.ndarray] = None,
serializer: Optional[Any] = None,
maximum_rows_for_test: int = 5000,
image_mode: str = "RGB",
test_data_path: Optional[str] = None,
transformations: Optional[Any] = None,
image_downloader: Optional[Any] = None,
feature_metadata: Optional[FeatureMetadata] = None,
image_width: Optional[float] = None,
max_evals: Optional[int] = DEFAULT_MAX_EVALS,
num_masks: Optional[int] = DEFAULT_NUM_MASKS,
mask_res: Optional[int] = DEFAULT_MASK_RES,
device: Optional[str] = Device.AUTO.value):
"""Creates an RAIVisionInsights object.
:param model: The model to compute RAI insights for.
A model that implements sklearn.predict or sklearn.predict_proba
or function that accepts a 2d ndarray.
:type model: object
:param test: The test dataframe including the label column.
:type test: pd.DataFrame
:param target_column: The name of the label column or list of columns.
This is a list of columns for multilabel models.
:type target_column: str or list[str]
:param task_type: The task to run.
:type task_type: str
:param classes: The class labels in the dataset.
:type classes: numpy.ndarray
:param serializer: Picklable custom serializer with save and load
methods for custom model serialization.
The save method writes the model to file given a parent directory.
The load method returns the deserialized model from the same
parent directory.
:type serializer: object
:param maximum_rows_for_test: Limit on size of test data
(for performance reasons)
:type maximum_rows_for_test: int
:param image_mode: The mode to open the image in.
See pillow documentation for all modes:
https://pillow.readthedocs.io/en/stable/handbook/concepts.html
:type image_mode: str
:param test_data_path: The path to the test data.
:type test_data_path: str
:param transformations: The transformations to apply to the image.
This must be a callable or a string column name with
transformed images.
:type transformations: object
:param image_downloader: The image downloader to use to download
images from a URL.
:type image_downloader: object
:param feature_metadata: Feature metadata for the dataset
to identify different kinds of features.
:type feature_metadata: Optional[FeatureMetadata]
:param image_width: The width to resize the image to.
The size is in inches. Note larger resolutions in
dashboard can cause slowness and memory errors.
If not specified does not resize images.
:type image_width: float
:param max_evals: The maximum number of evaluations to run.
Used by shap hierarchical image explainer.
If not specified defaults to 100.
:type max_evals: int
:param num_masks: The number of masks to use for the
DRISE image explainer for object detection.
If not specified defaults to 50.
:type num_masks: int
:param mask_res: The resolution of the masks to use for the
DRISE image explainer for object detection.
If not specified defaults to 4.
:type mask_res: int
:param device: The device to run the model on.
If not specified defaults to Device.AUTO.
:type device: str
"""
# drop index as this can cause issues later like when copying
# target column below from test dataset to _ext_test_df
test = test.reset_index(drop=True)
if feature_metadata is None:
# initialize to avoid having to keep checking if it is None
feature_metadata = FeatureMetadata()
self._feature_metadata = feature_metadata
self.image_mode = image_mode
self.image_width = image_width
if max_evals is None:
max_evals = DEFAULT_MAX_EVALS
elif max_evals < 1:
raise ValueError('max_evals must be greater than 0')
if num_masks is None:
num_masks = DEFAULT_NUM_MASKS
elif num_masks < 1:
raise ValueError('num_masks must be greater than 0')
if mask_res is None:
mask_res = DEFAULT_MASK_RES
elif mask_res < 1:
raise ValueError('mask_res must be greater than 0')
if device is None:
device = Device.AUTO.value
self.max_evals = max_evals
self.num_masks = num_masks
self.mask_res = mask_res
self.device = device
self.test_mltable_path = test_data_path
self._transformations = transformations
self._image_downloader = image_downloader
sample = test.iloc[0:2]
sample = get_images(sample, self.image_mode, self._transformations)
self._wrapped_model = wrap_model(
model, sample, task_type, classes=classes, device=device)
# adding this field to use in _get_single_image and _save_predictions
self._task_type = task_type
self.automl_image_model = is_automl_image_model(self._wrapped_model)
self._validate_rai_insights_input_parameters(
model=self._wrapped_model, test=test,
target_column=target_column, task_type=task_type,
classes=classes,
serializer=serializer,
maximum_rows_for_test=maximum_rows_for_test)
self._classes = RAIVisionInsights._get_classes(
task_type=task_type,
test=test,
target_column=target_column,
classes=classes
)
self.predict_output = None
if task_type == ModelTask.OBJECT_DETECTION:
test = transform_object_detection_labels(
test, target_column, self._classes)
super(RAIVisionInsights, self).__init__(
model, None, test, target_column, task_type,
serializer)
ext_test, ext_features = extract_features(
self.test, self.target_column, self.task_type,
self.image_mode,
self._feature_metadata.dropped_features)
self._ext_test = ext_test
self._ext_features = ext_features
self._ext_test_df = pd.DataFrame(ext_test, columns=ext_features)
self._ext_test_df[target_column] = test[target_column]
self._initialize_managers()
def _initialize_managers(self):
"""Initializes the managers.
Initializes the explainer manager.
"""
self._explainer_manager = ExplainerManager(
self._wrapped_model, self.test,
self.target_column,
self.task_type,
self._classes,
self.image_mode,
self.max_evals,
self.num_masks,
self.mask_res)
self._error_analysis_manager = ErrorAnalysisManager(
self._wrapped_model, self.test, self._ext_test_df,
self.target_column,
self.task_type,
self.image_mode,
self._transformations,
self._classes,
self._feature_metadata.categorical_features)
self._managers = [self._explainer_manager,
self._error_analysis_manager]
def compute(self, **kwargs):
"""Calls compute on each of the managers."""
for manager in self._managers:
manager.compute(**kwargs)
@staticmethod
def _get_classes(task_type, test, target_column, classes):
if task_type == ModelTask.IMAGE_CLASSIFICATION:
if classes is None:
classes = test[target_column].unique()
# sort the classes after calling unique in numeric case
classes.sort()
return classes
else:
return classes
elif task_type == ModelTask.MULTILABEL_IMAGE_CLASSIFICATION:
if classes is None:
return target_column
else:
return classes
elif task_type == ModelTask.OBJECT_DETECTION:
return classes
else:
return classes
def _validate_rai_insights_input_parameters(
self, model: Any, test: pd.DataFrame,
target_column: str, task_type: str,
classes: np.ndarray,
serializer,
maximum_rows_for_test: int):
"""Validate the inputs for the RAIVisionInsights constructor.
:param model: The model to compute RAI insights for.
A model that implements sklearn.predict or sklearn.predict_proba
or function that accepts a 2d ndarray.
:type model: object
:param test: The test dataset including the label column.
:type test: pandas.DataFrame
:param target_column: The name of the label column.
:type target_column: str
:param task_type: The task to run, can be `classification` or
`regression`.
:type task_type: str
:param classes: The class labels in the dataset.
:type classes: numpy.ndarray
:param serializer: Picklable custom serializer with save and load
methods defined for model that is not serializable. The save
method returns a dictionary state and load method returns the
model.
:type serializer: object
:param maximum_rows_for_test: Limit on size of test data
(for performance reasons)
:type maximum_rows_for_test: int
"""
valid_tasks = [
ModelTask.IMAGE_CLASSIFICATION.value,
ModelTask.MULTILABEL_IMAGE_CLASSIFICATION.value,
ModelTask.OBJECT_DETECTION.value
]
if task_type not in valid_tasks:
message = (f"Unsupported task type '{task_type}'. "
f"Should be one of {valid_tasks}")
raise UserConfigValidationException(message)
if model is None:
warnings.warn(
'INVALID-MODEL-WARNING: No valid model is supplied. '
'Explanations will not work')
if serializer is not None:
if not hasattr(serializer, 'save'):
raise UserConfigValidationException(
'The serializer does not implement save()')
if not hasattr(serializer, 'load'):
raise UserConfigValidationException(
'The serializer does not implement load()')
try:
pickle.dumps(serializer)
except Exception:
raise UserConfigValidationException(
'The serializer should be serializable via pickle')
test_is_pd = isinstance(test, pd.DataFrame)
if not test_is_pd:
raise UserConfigValidationException(
"Unsupported data type for test dataset. "
"Expecting pandas DataFrame."
)
if test.shape[0] > maximum_rows_for_test:
msg_fmt = 'The test data has {0} rows, ' +\
'but limit is set to {1} rows. ' +\
'Please resample the test data or ' +\
'adjust maximum_rows_for_test'
raise UserConfigValidationException(
msg_fmt.format(
test.shape[0], maximum_rows_for_test)
)
if task_type == ModelTask.MULTILABEL_IMAGE_CLASSIFICATION.value:
if not isinstance(target_column, list):
raise UserConfigValidationException(
'The target_column should be a list for multilabel '
'classification')
# check all target columns are present in test dataset
target_columns_set = set(target_column)
if not target_columns_set.issubset(set(test.columns)):
raise UserConfigValidationException(
'The list of target_column(s) should be in test data')
else:
if target_column not in list(test.columns):
raise UserConfigValidationException(
'Target name {0} not present in test data'.format(
target_column)
)
if model is not None:
# Pick one row from test data
test_img = self._get_single_image(test, target_column)
# Call the model
try:
model.predict(test_img)
except Exception:
raise UserConfigValidationException(
'The model passed cannot be used for'
' getting predictions via predict()'
)
def _get_single_image(self, dataset, target_column):
"""Get a single image from the test data.
Used for calling predict on the dataset.
:param dataset: The dataset to get the image from.
:type dataset: pandas.DataFrame
:param target_column: The name of the label column.
:type target_column: str
:return: A single image from the test data
:rtype: numpy.ndarray
"""
# Pick one row from dataset
if not isinstance(target_column, list):
target_column = [target_column]
img = dataset.drop(
target_column, axis=1).iloc[0][0]
if isinstance(img, str):
if self.automl_image_model:
if self._task_type == ModelTask.OBJECT_DETECTION:
img_data, img_size = get_base64_string_from_path(
img, return_image_size=True)
img = pd.DataFrame(
data=[[img_data, img_size]],
columns=[
MLFlowSchemaLiterals.INPUT_COLUMN_IMAGE,
MLFlowSchemaLiterals.INPUT_IMAGE_SIZE],
)
else:
img = pd.DataFrame(
data=[get_base64_string_from_path(img)],
columns=[MLFlowSchemaLiterals.INPUT_COLUMN_IMAGE],
)
return img
else:
img = get_image_from_path(img, self.image_mode)
# apply a transformation if the image is an RGBA image
if img[0][0].size == 4:
row, col, ch = img.shape
if ch == 4:
rgb = np.zeros((row, col, 3), dtype='float32')
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
a = np.asarray(img[:, :, 3], dtype='float32') / 255.0
rgb[:, :, 0] = r * a + (1.0 - a) * 255.0
rgb[:, :, 1] = g * a + (1.0 - a) * 255.0
rgb[:, :, 2] = b * a + (1.0 - a) * 255.0
img = rgb
return reshape_image(img)
def get_filtered_test_data(self, filters, composite_filters,
include_original_columns_only=False,
use_entire_test_data=False):
"""Get the filtered test data based on cohort filters.
:param filters: The filters to apply.
:type filters: list[Filter]
:param composite_filters: The composite filters to apply.
:type composite_filters: list[CompositeFilter]
:param include_original_columns_only: Whether to return the original
data columns.
:type include_original_columns_only: bool
:param use_entire_test_data: Whether to use entire test set for
filtering the data based on cohort.
:type use_entire_test_data: bool
:return: The filtered test data.
:rtype: pandas.DataFrame
"""
model_analyzer = self._error_analysis_manager._analyzer
dataset = model_analyzer.dataset
model = model_analyzer.model
if self.predict_output is None:
# Cache predictions of the model
self.predict_output = model_analyzer.model.predict(dataset)
pred_y = self.predict_output
true_y = model_analyzer.true_y
categorical_features = model_analyzer.categorical_features
categories = model_analyzer.categories
classes = model_analyzer.classes
model_task = model_analyzer.model_task
filter_data_with_cohort = FilterDataWithCohortFilters(
model=model,
dataset=dataset,
features=dataset.columns,
categorical_features=categorical_features,
categories=categories,
true_y=true_y,
pred_y=pred_y,
model_task=model_task,
classes=classes)
return filter_data_with_cohort.filter_data_from_cohort(
filters=filters,
composite_filters=composite_filters,
include_original_columns_only=include_original_columns_only)
@property
def error_analysis(self) -> ErrorAnalysisManager:
"""Get the error analysis manager.
:return: The error analysis manager.
:rtype: ErrorAnalysisManager
"""
return self._error_analysis_manager
@property
def explainer(self) -> ExplainerManager:
"""Get the explainer manager.
:return: The explainer manager.
:rtype: ExplainerManager
"""
return self._explainer_manager
def get_data(self):
"""Get all data as RAIInsightsData object
:return: Model Analysis Data
:rtype: RAIInsightsData
"""
data = RAIInsightsData()
dataset = self._get_dataset()
data.dataset = dataset
data.errorAnalysisData = self.error_analysis.get_data()
return data
def _get_dataset(self):
dashboard_dataset = Dataset()
tasktype = self.task_type
classification_tasks = [ModelTask.IMAGE_CLASSIFICATION,
ModelTask.MULTILABEL_IMAGE_CLASSIFICATION,
ModelTask.OBJECT_DETECTION]
is_classification_task = self.task_type in classification_tasks
if isinstance(self.task_type, Enum):
tasktype = self.task_type.value
dashboard_dataset.task_type = tasktype
categorical_features = self._feature_metadata.categorical_features
if categorical_features is None:
categorical_features = []
dashboard_dataset.categorical_features = categorical_features
dashboard_dataset.class_names = convert_to_list(
self._classes)
if is_classification_task:
if self.automl_image_model:
dataset = np.array(self.test.drop(
[self.target_column], axis=1).iloc[:, 0].tolist())
if tasktype == ModelTask.OBJECT_DETECTION.value:
dataset = pd.DataFrame(
data=[[x for x in get_base64_string_from_path(
img_path, return_image_size=True)] for
img_path in dataset],
columns=[
MLFlowSchemaLiterals.INPUT_COLUMN_IMAGE,
MLFlowSchemaLiterals.INPUT_IMAGE_SIZE],
)
else:
dataset = pd.DataFrame(
data=[
get_base64_string_from_path(img_path)
for img_path in dataset
],
columns=[MLFlowSchemaLiterals.INPUT_COLUMN_IMAGE],
)
else:
dataset = get_images(self.test, self.image_mode,
self._transformations)
else:
raise ValueError('Unknown task type: {}'.format(self.task_type))
predicted_y = None
if dataset is not None and self._wrapped_model is not None:
try:
predicted_y = self._wrapped_model.predict(dataset)
except Exception as ex:
msg = ('Model does not support predict method for given '
'dataset type')
raise ValueError(msg) from ex
try:
predicted_y = convert_to_list(predicted_y)
except Exception as ex:
raise ValueError(
'Model prediction output of unsupported type,') from ex
if predicted_y is not None:
if is_classification_task:
predicted_y = self._convert_labels(
predicted_y, dashboard_dataset.class_names)
dashboard_dataset.predicted_y = predicted_y
if tasktype == ModelTask.OBJECT_DETECTION:
dashboard_dataset.object_detection_predicted_y = predicted_y
row_length = len(dataset)
dashboard_dataset.features = self._ext_test
true_y = self.test[self.target_column]
if true_y is not None and len(true_y) == row_length:
true_y = convert_to_list(true_y)
if is_classification_task:
true_y = self._convert_labels(
true_y, dashboard_dataset.class_names)
dashboard_dataset.true_y = true_y
if tasktype == ModelTask.OBJECT_DETECTION:
dashboard_dataset.object_detection_true_y = true_y
dashboard_dataset.feature_names = self._ext_features
dashboard_dataset.target_column = self.target_column
column_names = list(self.test.columns)
if IMAGE in column_names:
images = self.test[:].image
elif IMAGE_URL in column_names:
images = self.test[:].image_url
else:
raise ValueError('No image column found in test data')
encoded_images = []
image_dimensions = []
for _, image in enumerate(images):
if isinstance(image, str):
image = get_image_from_path(image, self.image_mode)
s = io.BytesIO()
# IMshow only accepts floats in range [0, 1]
try:
image /= 255
except Exception:
# In-place divide can fail for certain types
image = image / 255
axes = pl.gca()
axes.get_xaxis().set_visible(False)
axes.get_yaxis().set_visible(False)
pl.imshow(image)
# resize image as optimization
size = pl.gcf().get_size_inches()
curr_width = size[0]
curr_height = size[1]
image_dimensions.append([image.shape[1], image.shape[0]])
new_width = self.image_width
if new_width is not None:
factor = new_width / curr_width
pl.gcf().set_size_inches((new_width, curr_height * factor))
pl.savefig(s, format='jpg', bbox_inches='tight', pad_inches=0.)
pl.clf()
s.seek(0)
b64_encoded = base64.b64encode(s.read())
b64 = b64_encoded.decode(CommonTags.IMAGE_DECODE_UTF_FORMAT)
encoded_images.append(b64)
# passing to frontend to draw bounding boxes with the correct scale
dashboard_dataset.imageDimensions = image_dimensions
if len(encoded_images) > 0:
dashboard_dataset.images = encoded_images
if tasktype == ModelTask.OBJECT_DETECTION:
d = dashboard_dataset
dashboard_dataset.object_detection_predicted_y = d.predicted_y
dashboard_dataset.object_detection_true_y = d.true_y
dashboard_dataset.predicted_y = self._format_od_labels(
dashboard_dataset.predicted_y,
class_names=dashboard_dataset.class_names
)
dashboard_dataset.true_y = self._format_od_labels(
dashboard_dataset.true_y,
class_names=dashboard_dataset.class_names
)
return dashboard_dataset
def _format_od_labels(self, y, class_names):
"""Formats the Object Detection label representation to
multi-label image classification to follow the UI format
provided in fridgeMultilabel.ts.
:param y: Target array
:type y: list
:param class_names: The class labels in the dataset.
:type class_names: list
:return: Formatted list of targets
:rtype: list
"""
formatted_labels = []
for image in y:
object_labels_lst = [0] * len(class_names)
for detection in image:
# tracking number of same objects in the image
object_labels_lst[int(detection[0] - 1)] += 1
formatted_labels.append(object_labels_lst)
return formatted_labels
def _convert_images(self, dataset):
"""Converts the images to the format required by the model.
If the images are base64 encoded, they are decoded and converted to
numpy arrays. If the images are already numpy arrays, they are
returned as is.
:param dataset: The dataset to convert.
:type dataset: numpy.ndarray
:return: The converted dataset.
:rtype: numpy.ndarray
"""
return convert_images(dataset, self.image_mode)
def _convert_images_base64_df(self, dataset: pd.DataFrame) -> pd.DataFrame:
"""Converts the images to the format required by the model.
If the images are base64 encoded, they are decoded and converted to
numpy arrays. If the images are already numpy arrays, they are
returned as is.
:param dataset: The dataset to convert.
:type dataset: pandas.DataFrame
:return: The base64 converted dataset.
:rtype: pandas.DataFrame
"""
if len(dataset) > 0 and isinstance(dataset[0], str):
dataset.loc[:, ImageColumns.IMAGE.value] = dataset.loc[
:, ImageColumns.IMAGE.value
].map(lambda x: get_base64_string_from_path(x))
return dataset
def save(self, path):
"""Save the RAIVisionInsights to the given path.
In addition to the usual data, saves the extracted features.
:param path: The directory path to save the RAIInsights to.
:type path: str
"""
super(RAIVisionInsights, self).save(path)
# Save extracted features data
self._save_ext_data(path)
self._save_transformations(path)
self._save_image_downloader(path)
def _save_ext_data(self, path):
"""Save the copy of raw data and their related metadata.
:param path: The directory path to save the RAIBaseInsights to.
:type path: str
"""
data_directory = Path(path) / SerializationAttributes.DATA_DIRECTORY
ext_path = data_directory / (_EXT_TEST + _JSON_EXTENSION)
ext_features_path = data_directory / (_EXT_FEATURES + _JSON_EXTENSION)
self._save_list_data(ext_path, self._ext_test)
self._save_list_data(ext_features_path, self._ext_features)
if self._image_downloader:
mltable_directory = data_directory / _MLTABLE_DIR
os.makedirs(mltable_directory, exist_ok=True)
mltable_data_dict = {}
if self.test_mltable_path:
mltable_dir = self.test_mltable_path.split('/')[-1]
mltable_data_dict[_TEST_MLTABLE_PATH] = mltable_dir
test_dir = mltable_directory / mltable_dir
shutil.copytree(
Path(self.test_mltable_path), test_dir
)
if mltable_data_dict:
dict_path = mltable_directory / _MLTABLE_METADATA_FILENAME
with open(dict_path, 'w') as file:
json.dump(
mltable_data_dict, file, default=serialize_json_safe)
def _save_transformations(self, path):
"""Save the transformations to the given path using pickle.
:param path: The directory path to save the transformations to.
:type path: str
"""
if self._transformations is not None:
transformations_path = Path(path) / _TRANSFORMATIONS
with open(transformations_path, 'wb') as f:
pickle.dump(self._transformations, f)
def _save_image_downloader(self, path):
"""Save the image downloader to the given path using pickle.
:param path: The directory path to save the image downloader to.
:type path: str
"""
if self._image_downloader is not None:
image_downloader_path = Path(path) / _IMAGE_DOWNLOADER
with open(image_downloader_path, 'wb') as f:
pickle.dump(self._image_downloader, f)
def _save_list_data(self, data_path, data):
"""Save the list data to the given path.
:param data_path: The path to save the data to.
:type data_path: str
:param data: The data to save.
:type data: list
"""
with open(data_path, 'w') as file:
json.dump(data, file, default=serialize_json_safe)
def _convert_labels(self, labels, class_names, unique_labels=None):
"""Convert labels to indexes if possible.
:param labels: Labels to convert.
:type labels: list or numpy.ndarray
:param class_names: List of class names.
:type class_names: list
:param unique_labels: List of unique labels.
:type unique_labels: list
:return: Converted labels.
:rtype: list
"""
if self.task_type == ModelTask.OBJECT_DETECTION:
return labels
unique_labels = unique_labels or np.unique(labels).tolist()
if isinstance(labels[0], list):
return [self._convert_labels(
li, class_names, unique_labels) for li in labels]
is_boolean = all(isinstance(y, (bool)) for y in unique_labels)
if is_boolean:
labels_arr = np.array(labels)
labels = labels_arr.astype(float).tolist()
if class_names is not None:
num_types = (int, float)
is_numeric = all(isinstance(y, num_types) for y in unique_labels)
if not is_numeric:
labels = [class_names.index(y) for y in labels]
return labels
def _save_predictions(self, path):
"""Save the predict() and predict_proba() output.
:param path: The directory path to save the RAIVisionInsights to.
:type path: str
"""
prediction_output_path = Path(path) / _PREDICTIONS
prediction_output_path.mkdir(parents=True, exist_ok=True)
if self.model is None:
return
if self.automl_image_model:
test = np.array(
self.test.drop([self.target_column], axis=1)
.iloc[:, 0]
.tolist()
)
if self._task_type == ModelTask.OBJECT_DETECTION.value:
test = pd.DataFrame(
data=[[x for x in get_base64_string_from_path(
img_path, return_image_size=True)] for
img_path in test],
columns=[
MLFlowSchemaLiterals.INPUT_COLUMN_IMAGE,
MLFlowSchemaLiterals.INPUT_IMAGE_SIZE],
)
else:
test = pd.DataFrame(
data=[
get_base64_string_from_path(img_path) for img_path in
test
],
columns=[MLFlowSchemaLiterals.INPUT_COLUMN_IMAGE],
)
else:
test = get_images(
self.test, self.image_mode, self._transformations
)
predict_output = self._wrapped_model.predict(test)
if type(predict_output) != list:
predict_output = predict_output.tolist()
self._write_to_file(
prediction_output_path / (_PREDICT + _JSON_EXTENSION),
json.dumps(predict_output))
if hasattr(self.model, SKLearn.PREDICT_PROBA):
predict_proba_output = self.model.predict_proba(test)
if type(predict_proba_output) != list:
predict_proba_output = predict_proba_output.tolist()
self._write_to_file(
prediction_output_path / (_PREDICT_PROBA + _JSON_EXTENSION),
json.dumps(predict_proba_output))
def _save_metadata(self, path):
"""Save the metadata like target column, categorical features,
task type and the classes (if any).
:param path: The directory path to save the RAIVisionInsights to.
:type path: str
"""
top_dir = Path(path)
classes = convert_to_list(self._classes)
feature_metadata_dict = self._feature_metadata.to_dict()
meta = {
_TARGET_COLUMN: self.target_column,
_TASK_TYPE: self.task_type,
_CLASSES: classes,
_IMAGE_MODE: self.image_mode,
_FEATURE_METADATA: feature_metadata_dict,
_IMAGE_WIDTH: self.image_width,
_MAX_EVALS: self.max_evals,
_NUM_MASKS: self.num_masks,
_MASK_RES: self.mask_res,
_DEVICE: self.device
}
with open(top_dir / _META_JSON, 'w') as file:
json.dump(meta, file)
@staticmethod
def _load_metadata(inst, path):
"""Load the metadata.
:param inst: RAIVisionInsights object instance.
:type inst: RAIVisionInsights
:param path: The directory path to metadata location.
:type path: str
"""
top_dir = Path(path)
with open(top_dir / _META_JSON, 'r') as meta_file:
meta = meta_file.read()
meta = json.loads(meta)
inst.__dict__[_TARGET_COLUMN] = meta[_TARGET_COLUMN]
inst.__dict__[_TASK_TYPE] = meta[_TASK_TYPE]
inst.__dict__[_IMAGE_MODE] = meta[_IMAGE_MODE]
if _IMAGE_WIDTH in meta:
inst.__dict__[_IMAGE_WIDTH] = meta[_IMAGE_WIDTH]
else:
inst.__dict__[_IMAGE_WIDTH] = None
params = [_MAX_EVALS, _NUM_MASKS, _MASK_RES, _DEVICE]
defaults = [DEFAULT_MAX_EVALS, DEFAULT_NUM_MASKS,
DEFAULT_MASK_RES, Device.AUTO.value]
for param, default in zip(params, defaults):
if param in meta:
inst.__dict__[param] = meta[param]
else:
inst.__dict__[param] = default
classes = meta[_CLASSES]
inst.__dict__['_' + _CLASSES] = RAIVisionInsights._get_classes(
task_type=meta[_TASK_TYPE],
test=inst.__dict__[_TEST],
target_column=meta[_TARGET_COLUMN],
classes=classes
)
if (Metadata.FEATURE_METADATA not in meta or
meta[Metadata.FEATURE_METADATA] is None):
inst.__dict__['_' + Metadata.FEATURE_METADATA] = FeatureMetadata()
else:
inst.__dict__['_' + Metadata.FEATURE_METADATA] = FeatureMetadata(
identity_feature_name=meta[Metadata.FEATURE_METADATA][
_IDENTITY_FEATURE_NAME],
datetime_features=meta[Metadata.FEATURE_METADATA][
_DATETIME_FEATURES],
time_series_id_features=meta[Metadata.FEATURE_METADATA][
_TIME_SERIES_ID_FEATURES],
categorical_features=meta[Metadata.FEATURE_METADATA][
_CATEGORICAL_FEATURES],
dropped_features=meta[Metadata.FEATURE_METADATA][
_DROPPED_FEATURES])
# load the image downloader as part of metadata
RAIVisionInsights._load_image_downloader(inst, path)
# load the transformations as part of metadata
RAIVisionInsights._load_transformations(inst, path)
# load the extracted features as part of metadata
RAIVisionInsights._load_ext_data(inst, path)
@staticmethod
def _load_ext_data(inst, path):
"""Load the extracted features data.
:param inst: RAIVisionInsights object instance.
:type inst: RAIVisionInsights
:param path: The directory path to extracted data location.
:type path: str
"""
top_dir = Path(path)
data_path = top_dir / SerializationAttributes.DATA_DIRECTORY
json_test_path = data_path / (_EXT_TEST + _JSON_EXTENSION)
with open(json_test_path, 'r') as file:
inst._ext_test = json.loads(file.read())
json_features_path = data_path / (_EXT_FEATURES + _JSON_EXTENSION)
with open(json_features_path, 'r') as file:
inst._ext_features = json.loads(file.read())
inst._ext_test_df = pd.DataFrame(
inst._ext_test, columns=inst._ext_features)
target_column = inst.target_column
test = inst.test
inst._ext_test_df[target_column] = test[target_column]
inst.test_mltable_path = None
mltable_directory = data_path / _MLTABLE_DIR
if inst._image_downloader and len(os.listdir(mltable_directory)) > 0:
mltable_dict_path = mltable_directory / _MLTABLE_METADATA_FILENAME
mltable_dict = {}
with open(mltable_dict_path, 'r') as file:
mltable_dict = json.load(file)
if mltable_dict.get(_TEST_MLTABLE_PATH, ''):
inst.test_mltable_path = str(mltable_directory / mltable_dict[
_TEST_MLTABLE_PATH])
test_dataset = inst._image_downloader(inst.test_mltable_path)
inst.test = test_dataset._images_df
@staticmethod
def _load_transformations(inst, path):
"""Load the transformations from pickle file.
:param inst: RAIVisionInsights object instance.
:type inst: RAIVisionInsights
:param path: The directory path to transformations location.
:type path: str
"""
top_dir = Path(path)
transformations_file = top_dir / _TRANSFORMATIONS
if transformations_file.exists():
with open(transformations_file, 'rb') as file:
inst._transformations = pickle.load(file)
else:
inst._transformations = None
@staticmethod
def _load_image_downloader(inst, path):
"""Load the image downloader from pickle file.
:param inst: RAIVisionInsights object instance.
:type inst: RAIVisionInsights
:param path: The directory path to image downloader location.
:type path: str
"""
top_dir = Path(path)
image_downloader_file = top_dir / _IMAGE_DOWNLOADER
if image_downloader_file.exists():
with open(image_downloader_file, 'rb') as file:
inst._image_downloader = pickle.load(file)
else:
inst._image_downloader = None
@staticmethod
def load(path):
"""Load the RAIVisionInsights from the given path.
:param path: The directory path to load the RAIVisionInsights from.
:type path: str
:return: The RAIVisionInsights object after loading.
:rtype: RAIVisionInsights
"""
# create the RAIVisionInsights without any properties using the __new__
# function, similar to pickle
inst = RAIVisionInsights.__new__(RAIVisionInsights)
manager_map = {
ManagerNames.EXPLAINER: ExplainerManager,
ManagerNames.ERROR_ANALYSIS: ErrorAnalysisManager,
}
# load current state
RAIBaseInsights._load(
path, inst, manager_map, RAIVisionInsights._load_metadata)
inst._wrapped_model = wrap_model(inst.model, inst.test, inst.task_type,
classes=inst._classes,
device=inst.device)
inst.automl_image_model = is_automl_image_model(inst._wrapped_model)
inst.predict_output = None
return inst
def compute_object_detection_metrics(
self,
selection_indexes,
aggregate_method,
class_name,
iou_threshold,
object_detection_cache):
dashboard_dataset = self.get_data().dataset
true_y = dashboard_dataset.object_detection_true_y
predicted_y = dashboard_dataset.object_detection_predicted_y
dashboard_dataset = self.get_data().dataset
true_y = dashboard_dataset.object_detection_true_y
predicted_y = dashboard_dataset.object_detection_predicted_y
normalized_iou_threshold = [iou_threshold / 100.0]
all_cohort_metrics = []
for cohort_indices in selection_indexes:
key = ','.join([str(cid) for cid in cohort_indices] +
[aggregate_method, class_name, str(iou_threshold)])
if key in object_detection_cache:
all_cohort_metrics.append(object_detection_cache[key])
continue
metric_OD = MeanAveragePrecision(
class_metrics=True,
iou_thresholds=normalized_iou_threshold,
average=aggregate_method)
true_y_cohort = [true_y[cohort_index] for cohort_index
in cohort_indices]
predicted_y_cohort = [predicted_y[cohort_index] for cohort_index
in cohort_indices]
pred_boxes, pred_labels, pred_scores = [], [], []
for pred_image in predicted_y_cohort:
for pred_object in pred_image:
pred_labels.append(int(pred_object[0]))
pred_boxes.append(pred_object[1:5])
pred_scores.append(pred_object[-1])
gt_boxes, gt_labels = [], []
for gt_image in true_y_cohort:
for gt_object in gt_image:
gt_labels.append(int(gt_object[0]))
gt_boxes.append(gt_object[1:5])
# creating the list of dictionaries for pred and gt
cohort_pred = [
dict(
boxes=torch.tensor(pred_boxes),
scores=torch.tensor(pred_scores),
labels=torch.tensor(pred_labels),
)
]
cohort_gt = [
dict(
boxes=torch.tensor(gt_boxes),
labels=torch.tensor(gt_labels),
)
]
# this is to find the class index given
# that there might not all classes in the cohort to predict or gt
classes = self._classes
classes = list(classes)
cohort_classes = list(set([classes[i - 1]
for i in pred_labels + gt_labels]))
cohort_classes.sort(
key=lambda cname: classes.index(cname))
# to catch if the class is not in the cohort
if class_name not in cohort_classes:
all_cohort_metrics.append([-1, -1, -1])
else:
metric_OD.update(cohort_pred,
cohort_gt)
object_detection_values = metric_OD.compute()
mAP = round(object_detection_values
['map'].item(), 2)
APs = [round(value, 2) for value in
object_detection_values['map_per_class']
.detach().tolist()]
ARs = [round(value, 2) for value in
object_detection_values['mar_100_per_class']
.detach().tolist()]
assert len(APs) == len(ARs) == len(cohort_classes)
all_submetrics = [[mAP, APs[i], ARs[i]]
for i in range(len(APs))]
all_cohort_metrics.append(all_submetrics)
return [all_cohort_metrics, cohort_classes] | /responsibleai_vision-0.2.4-py3-none-any.whl/responsibleai_vision/rai_vision_insights/rai_vision_insights.py | 0.876278 | 0.250698 | rai_vision_insights.py | pypi |
import json
from typing import Any, List, Optional
import jsonschema
import numpy as np
import pandas as pd
from ml_wrappers import wrap_model
from erroranalysis._internal.error_analyzer import ModelAnalyzer
from erroranalysis._internal.error_report import as_error_report
from responsibleai._tools.shared.state_directory_management import \
DirectoryManager
from responsibleai.managers.error_analysis_manager import \
ErrorAnalysisManager as BaseErrorAnalysisManager
from responsibleai.managers.error_analysis_manager import as_error_config
from responsibleai_vision.common.constants import (MLFlowSchemaLiterals,
ModelTask)
from responsibleai_vision.utils.image_reader import (
get_base64_string_from_path, is_automl_image_model)
from responsibleai_vision.utils.image_utils import get_images
LABELS = 'labels'
def _concat_labels_column(dataset, target_column, classes):
"""Concatenate labels column for multilabel models.
:param dataset: The dataset including the label column.
:type dataset: pandas.DataFrame
:param target_column: The list of label columns in multilabel task.
:type target_column: list[str]
:param classes: The list of labels in multilabel task.
:type classes: list
:return: The labels column concatenated.
:rtype: list
"""
labels = []
for _, row in dataset[target_column].iterrows():
row_idxs = range(len(row))
pred_classes = [classes[i] for i in row_idxs if row[i]]
labels.append(','.join(pred_classes))
return labels
class WrappedIndexPredictorModel:
"""Wraps model that uses index to retrieve image data for making
predictions."""
def __init__(self, model, dataset, image_mode, transformations,
task_type, classes=None):
"""Initialize the WrappedIndexPredictorModel.
:param model: The model to wrap.
:type model: object
:param dataset: The dataset to use for making predictions.
:type dataset: pandas.DataFrame
:param image_mode: The mode to open the image in.
See pillow documentation for all modes:
https://pillow.readthedocs.io/en/stable/handbook/concepts.html
:type image_mode: str
:param transformations: The transformations to apply to the image.
:type transformations: object
:param task_type: The task to run.
:type task_type: str
:param classes: The classes for the model.
:type classes: list
"""
self.model = model
self.dataset = dataset
self.classes = classes
self.image_mode = image_mode
self.transformations = transformations
self.task_type = task_type
if task_type == ModelTask.OBJECT_DETECTION:
return
if is_automl_image_model(self.model):
test = np.array(
self.dataset.iloc[:, 0].tolist()
)
test = pd.DataFrame(
data=[
get_base64_string_from_path(img_path) for img_path in test
],
columns=[MLFlowSchemaLiterals.INPUT_COLUMN_IMAGE],
)
else:
test = get_images(self.dataset, self.image_mode,
self.transformations)
self.predictions = self.model.predict(test)
if task_type == ModelTask.MULTILABEL_IMAGE_CLASSIFICATION:
predictions_joined = []
for row in self.predictions:
# get all labels where prediction is 1
pred_labels = [i for i in range(len(row)) if row[i]]
if self.classes is not None:
pred_labels = [self.classes[i] for i in pred_labels]
else:
pred_labels = [str(i) for i in pred_labels]
# concatenate all predicted labels into a single string
predictions_joined.append(','.join(pred_labels))
self.predictions = np.array(predictions_joined)
self.predict_proba = self.model.predict_proba(test)
def predict(self, X):
"""Predict the class labels for the provided data.
:param X: Data to predict the labels for.
:type X: pandas.DataFrame
:return: Predicted class labels.
:rtype: list
"""
index = X.index
predictions = self.predictions[index]
if self.task_type == ModelTask.MULTILABEL_IMAGE_CLASSIFICATION:
return predictions
if self.classes is not None:
predictions = [self.classes[y] for y in predictions]
return predictions
def predict_proba(self, X):
"""Predict the class probabilities for the provided data.
:param X: Data to predict the probabilities for.
:type X: pandas.DataFrame
:return: Predicted class probabilities.
:rtype: list[list]
"""
index = X.index
pred_proba = self.predict_proba[index]
return pred_proba
class ErrorAnalysisManager(BaseErrorAnalysisManager):
"""Defines a wrapper class of Error Analysis for vision scenario."""
def __init__(self, model: Any, dataset: pd.DataFrame,
ext_dataset: pd.DataFrame, target_column: str,
task_type: str,
image_mode: str, transformations: Any,
classes: Optional[List] = None,
categorical_features: Optional[List[str]] = None):
"""Creates an ErrorAnalysisManager object.
:param model: The model to analyze errors on.
A model that implements sklearn.predict or sklearn.predict_proba
or function that accepts a 2d ndarray.
:type model: object
:param dataset: The dataset including the label column.
:type dataset: pandas.DataFrame
:param ext_dataset: The dataset of extracted features including the
label column.
:type ext_dataset: pandas.DataFrame
:param target_column: The name of the label column.
:type target_column: str
:param task_type: The task to run.
:type task_type: str
:param image_mode: The mode to open the image in.
See pillow documentation for all modes:
https://pillow.readthedocs.io/en/stable/handbook/concepts.html
:type image_mode: str
:param transformations: The transformations to apply to the image.
:type transformations: object
:param classes: Class names as a list of strings.
The order of the class names should match that of the model
output. Only required if analyzing a classifier.
:type classes: list
:param categorical_features: The categorical feature names.
:type categorical_features: list[str]
"""
index_classes = classes
is_od = task_type == ModelTask.OBJECT_DETECTION
if isinstance(target_column, list) and not is_od:
# create copy of dataset as we will make modifications to it
dataset = dataset.copy()
index_classes = target_column
labels = _concat_labels_column(dataset, target_column,
index_classes)
dataset[LABELS] = labels
ext_dataset[LABELS] = dataset[LABELS]
dataset.drop(columns=target_column, inplace=True)
ext_dataset.drop(columns=target_column, inplace=True)
target_column = LABELS
index_predictor = ErrorAnalysisManager._create_index_predictor(
model, dataset, target_column, task_type, image_mode,
transformations, index_classes)
super(ErrorAnalysisManager, self).__init__(
index_predictor, ext_dataset, target_column,
classes, categorical_features)
def compute(self, **kwargs):
"""Compute the error analysis data.
:param kwargs: The keyword arguments to pass to the compute method.
Note that this method does not take any arguments currently.
:type kwargs: dict
"""
super(ErrorAnalysisManager, self).compute()
@staticmethod
def _create_index_predictor(model, dataset, target_column, task_type,
image_mode, transformations, classes=None):
"""Creates a wrapped predictor that uses index to retrieve text data.
:param model: The model to analyze errors on.
A model that implements sklearn.predict or sklearn.predict_proba
or function that accepts a 2d ndarray.
:type model: object
:param dataset: The dataset including the label column.
:type dataset: pandas.DataFrame
:target_column: The name of the label column.
:type target_column: str
:param task_type: The task to run.
:type task_type: str
:param image_mode: The mode to open the image in.
See pillow documentation for all modes:
https://pillow.readthedocs.io/en/stable/handbook/concepts.html
:type image_mode: str
:param transformations: The transformations to apply to the image.
:type transformations: Any
:param classes: Class names as a list of strings.
The order of the class names should match that of the model
output.
:type classes: list
:return: A wrapped predictor that uses index to retrieve text data.
:rtype: WrappedIndexPredictorModel
"""
dataset = dataset.drop(columns=[target_column])
index_predictor = WrappedIndexPredictorModel(
model, dataset, image_mode, transformations, task_type, classes)
return index_predictor
@staticmethod
def _load(path, rai_insights):
"""Load the ErrorAnalysisManager from the given path.
:param path: The directory path to load the ErrorAnalysisManager from.
:type path: str
:param rai_insights: The loaded parent RAIInsights.
:type rai_insights: RAIInsights
:return: The ErrorAnalysisManager manager after loading.
:rtype: ErrorAnalysisManager
"""
# create the ErrorAnalysisManager without any properties using
# the __new__ function, similar to pickle
inst = ErrorAnalysisManager.__new__(ErrorAnalysisManager)
ea_config_list = []
ea_report_list = []
all_ea_dirs = DirectoryManager.list_sub_directories(path)
for ea_dir in all_ea_dirs:
directory_manager = DirectoryManager(
parent_directory_path=path,
sub_directory_name=ea_dir)
config_path = (directory_manager.get_config_directory() /
'config.json')
with open(config_path, 'r') as file:
ea_config = json.load(file, object_hook=as_error_config)
ea_config_list.append(ea_config)
report_path = (directory_manager.get_data_directory() /
'report.json')
with open(report_path, 'r') as file:
ea_report = json.load(file, object_hook=as_error_report)
# Validate the serialized output against schema
schema = ErrorAnalysisManager._get_error_analysis_schema()
jsonschema.validate(
json.loads(ea_report.to_json()), schema)
ea_report_list.append(ea_report)
inst.__dict__['_ea_report_list'] = ea_report_list
inst.__dict__['_ea_config_list'] = ea_config_list
feature_metadata = rai_insights._feature_metadata
categorical_features = feature_metadata.categorical_features
inst.__dict__['_categorical_features'] = categorical_features
target_column = rai_insights.target_column
true_y = rai_insights._ext_test_df[target_column]
if isinstance(target_column, list):
dropped_cols = target_column
else:
dropped_cols = [target_column]
dataset = rai_insights._ext_test_df.drop(columns=dropped_cols)
inst.__dict__['_dataset'] = dataset
feature_names = list(dataset.columns)
inst.__dict__['_feature_names'] = feature_names
task_type = rai_insights.task_type
wrapped_model = wrap_model(rai_insights.model, dataset,
rai_insights.task_type,
classes=rai_insights._classes,
device=rai_insights.device)
inst.__dict__['_task_type'] = task_type
index_classes = rai_insights._classes
is_od = task_type == ModelTask.OBJECT_DETECTION
index_dataset = rai_insights.test
if isinstance(target_column, list) and not is_od:
# create copy of dataset as we will make modifications to it
index_dataset = index_dataset.copy()
index_classes = target_column
labels = _concat_labels_column(index_dataset, target_column,
index_classes)
index_dataset.drop(columns=target_column, inplace=True)
index_dataset[LABELS] = labels
target_column = LABELS
true_y = index_dataset[target_column]
inst.__dict__['_true_y'] = true_y
index_predictor = ErrorAnalysisManager._create_index_predictor(
wrapped_model, index_dataset, target_column,
task_type, rai_insights.image_mode,
rai_insights._transformations,
rai_insights._classes)
inst.__dict__['_analyzer'] = ModelAnalyzer(index_predictor,
dataset,
true_y,
feature_names,
categorical_features)
return inst | /responsibleai_vision-0.2.4-py3-none-any.whl/responsibleai_vision/managers/error_analysis_manager.py | 0.897273 | 0.499146 | error_analysis_manager.py | pypi |
import base64
from io import BytesIO
from typing import Any, Tuple, Union
import requests
from numpy import asarray
from PIL import Image
from responsibleai_vision.common.constants import (AutoMLImagesModelIdentifier,
CommonTags)
def get_image_from_path(image_path, image_mode):
"""Get image from path.
:param image_path: The path to the image.
:type image_path: str
:param image_mode: The mode to open the image in.
See pillow documentation for all modes:
https://pillow.readthedocs.io/en/stable/handbook/concepts.html
:type image_mode: str
:return: The image as a numpy array.
:rtype: numpy.ndarray
"""
image_open_pointer = image_path
if image_path.startswith("http://") or image_path.startswith("https://"):
response = requests.get(image_path)
image_open_pointer = BytesIO(response.content)
with Image.open(image_open_pointer) as im:
if image_mode is not None:
im = im.convert(image_mode)
image_array = asarray(im)
return image_array
def get_base64_string_from_path(img_path: str,
return_image_size: bool = False) \
-> Union[str, Tuple[str, Tuple[int, int]]]:
"""Load and convert pillow image to base64-encoded image
:param img_path: image path
:type img_path: str
:param return_image_size: true if image size should also be returned
:type return_image_size: bool
:return: base64-encoded image OR base64-encoded image and image size
:rtype: Union[str, Tuple[str, Tuple[int, int]]]
"""
try:
img = Image.open(img_path)
except Exception as e:
print("file not found", str(e))
import urllib.request
urllib.request.urlretrieve(img_path, "tempfile")
img = Image.open("tempfile")
imgio = BytesIO()
img.save(imgio, img.format)
img_str = base64.b64encode(imgio.getvalue())
if return_image_size:
return img_str.decode(CommonTags.IMAGE_DECODE_UTF_FORMAT), img.size
return img_str.decode(CommonTags.IMAGE_DECODE_UTF_FORMAT)
def is_automl_image_model(model: Any) -> bool:
"""Check whether the model is automl images mlflow type
:param model: Model object
:type model: supported model types
:return: True if automl model type else False
:rtype: bool
"""
automl_image_model = False
model_type = str(type(model))
if model_type.endswith(
AutoMLImagesModelIdentifier.AUTOML_IMAGE_CLASSIFICATION_MODEL
) or model_type.endswith(
AutoMLImagesModelIdentifier.AUTOML_OBJECT_DETECTION_MODEL
):
automl_image_model = True
return automl_image_model | /responsibleai_vision-0.2.4-py3-none-any.whl/responsibleai_vision/utils/image_reader.py | 0.81571 | 0.406096 | image_reader.py | pypi |
from typing import List, Optional
import pandas as pd
from tqdm import tqdm
from responsibleai_vision.utils.image_reader import get_image_from_path
def extract_features(image_dataset: pd.DataFrame,
target_column: str, task_type: str,
image_mode: str = None,
dropped_features: Optional[List[str]] = None):
'''Extract tabular data features from the image dataset.
:param image_dataset: A pandas dataframe containing the image data.
:type image_dataset: pandas.DataFrame
:param target_column: The name of the label column or list of columns.
This is a list of columns for multilabel models.
:type target_column: str or list[str]
:param task_type: The type of task to be performed.
:type task_type: str
:param image_mode: The mode to open the image in.
See pillow documentation for all modes:
https://pillow.readthedocs.io/en/stable/handbook/concepts.html
:type image_mode: str
:param dropped_features: The list of features to drop from the dataset.
:type dropped_features: list[str]
:return: The list of extracted features and the feature names.
:rtype: list, list
'''
results = []
feature_names = ["mean_pixel_value"]
column_names = image_dataset.columns
has_dropped_features = dropped_features is not None
start_meta_index = 2
if isinstance(target_column, list):
start_meta_index = len(target_column) + 1
for j in range(start_meta_index, image_dataset.shape[1]):
if has_dropped_features and column_names[j] in dropped_features:
continue
feature_names.append(column_names[j])
for i in tqdm(range(image_dataset.shape[0])):
image = image_dataset.iloc[i][0]
if isinstance(image, str):
image = get_image_from_path(image, image_mode)
mean_pixel_value = image.mean()
row_feature_values = [mean_pixel_value]
# append all features other than target column and label
for j in range(start_meta_index, image_dataset.shape[1]):
if has_dropped_features and column_names[j] in dropped_features:
continue
row_feature_values.append(image_dataset.iloc[i][j])
results.append(row_feature_values)
return results, feature_names | /responsibleai_vision-0.2.4-py3-none-any.whl/responsibleai_vision/utils/feature_extractors.py | 0.897331 | 0.465691 | feature_extractors.py | pypi |
from enum import Enum
class ModelTask(str, Enum):
"""Provide model task constants.
Can be 'image_classification', 'object_detection' or 'unknown'.
"""
IMAGE_CLASSIFICATION = 'image_classification'
MULTILABEL_IMAGE_CLASSIFICATION = 'multilabel_image_classification'
OBJECT_DETECTION = 'object_detection'
UNKNOWN = 'unknown'
class ImageColumns(str, Enum):
"""Provide constants related to the input image dataframe columns.
Can be 'image_url', 'image' or 'label'.
"""
IMAGE_URL = 'image_url'
IMAGE = 'image'
LABEL = 'label'
IMAGE_DETAILS = 'image_details'
class ExplainabilityLiterals:
"""Parameters for explainability method names."""
MODEL_EXPLAINABILITY = 'model_explainability'
XAI_PARAMETERS = 'xai_parameters'
XAI_ALGORITHM = 'xai_algorithm'
SHAP_METHOD_NAME = 'shap'
XRAI_METHOD_NAME = 'xrai'
INTEGRATEDGRADIENTS_METHOD_NAME = 'integrated_gradients'
GUIDEDGRADCAM_METHOD_NAME = 'guided_gradcam'
GUIDEDBACKPROP_METHOD_NAME = 'guided_backprop'
CONFIDENCE_SCORE_THRESHOLD_MULTILABEL = (
'confidence_score_threshold_multilabel'
)
N_STEPS = "n_steps"
APPROXIMATION_METHOD = "approximation_method"
XRAI_FAST = "xrai_fast"
XAI_ARGS_GROUP = [
XAI_ALGORITHM,
N_STEPS,
APPROXIMATION_METHOD,
XRAI_FAST,
CONFIDENCE_SCORE_THRESHOLD_MULTILABEL,
]
SHAP = 'shap'
class ExplainabilityDefaults:
"""DEFAULT values for explainability parameters."""
MODEL_EXPLAINABILITY = False
XAI_ALGORITHM = ExplainabilityLiterals.GUIDEDGRADCAM_METHOD_NAME
OUTPUT_VISUALIZATIONS = True
OUTPUT_ATTRIBUTIONS = False
CONFIDENCE_SCORE_THRESHOLD_MULTILABEL = 0.5
DEFAULT_MAX_EVALS = 100
DEFAULT_MASK_RES = 4
DEFAULT_NUM_MASKS = 50
class XAIPredictionLiterals:
"""Strings that will be keys in the output json during prediction."""
VISUALIZATIONS_KEY_NAME = 'visualizations'
ATTRIBUTIONS_KEY_NAME = 'attributions'
class MLFlowSchemaLiterals:
"""MLFlow model signature related schema"""
INPUT_IMAGE_KEY = 'image_base64'
INPUT_COLUMN_IMAGE = 'image'
INPUT_IMAGE_SIZE = 'image_size'
class CommonTags:
"""Common constants"""
IMAGE_DECODE_UTF_FORMAT = 'utf-8'
class AutoMLImagesModelIdentifier:
"""AutoML model object types"""
AUTOML_IMAGE_CLASSIFICATION_MODEL = (
"WrappedMlflowAutomlImagesClassificationModel'>"
)
AUTOML_OBJECT_DETECTION_MODEL = (
"WrappedMlflowAutomlObjectDetectionModel'>"
) | /responsibleai_vision-0.2.4-py3-none-any.whl/responsibleai_vision/common/constants.py | 0.859162 | 0.190743 | constants.py | pypi |









# Responsible AI Toolbox
Responsible AI is an approach to assessing, developing, and deploying AI systems in a safe, trustworthy, and ethical manner, and take responsible decisions and actions.
Responsible AI Toolbox is a suite of tools providing a collection of model and data exploration and assessment user interfaces and libraries that enable a better understanding of AI systems. These interfaces and libraries empower developers and stakeholders of AI systems to develop and monitor AI more responsibly, and take better data-driven actions.
<p align="center">
<img src="https://raw.githubusercontent.com/microsoft/responsible-ai-widgets/main/img/responsible-ai-toolbox.png" alt="ResponsibleAIToolboxOverview" width="750"/>
The Toolbox consists of three repositories:
| Repository| Tools Covered |
|--|--|
| [Responsible-AI-Toolbox Repository](https://github.com/microsoft/responsible-ai-toolbox) (Here) |This repository contains four visualization widgets for model assessment and decision making:<br>1. [Responsible AI dashboard](https://github.com/microsoft/responsible-ai-toolbox#introducing-responsible-ai-dashboard), a single pane of glass bringing together several mature Responsible AI tools from the toolbox for a holistic responsible assessment and debugging of models and making informed business decisions. With this dashboard, you can identify model errors, diagnose why those errors are happening, and mitigate them. Moreover, the causal decision-making capabilities provide actionable insights to your stakeholders and customers.<br>2. [Error Analysis dashboard](https://github.com/microsoft/responsible-ai-toolbox/blob/main/docs/erroranalysis-dashboard-README.md), for identifying model errors and discovering cohorts of data for which the model underperforms. <br>3. [Interpretability dashboard](https://github.com/microsoft/responsible-ai-toolbox/blob/main/docs/explanation-dashboard-README.md), for understanding model predictions. This dashboard is powered by InterpretML.<br>4. [Fairness dashboard](https://github.com/microsoft/responsible-ai-toolbox/blob/main/docs/fairness-dashboard-README.md), for understanding model’s fairness issues using various group-fairness metrics across sensitive features and cohorts. This dashboard is powered by Fairlearn.
| [Responsible-AI-Toolbox-Mitigations Repository](https://github.com/microsoft/responsible-ai-toolbox-mitigations) | The Responsible AI Mitigations Library helps AI practitioners explore different measurements and mitigation steps that may be most appropriate when the model underperforms for a given data cohort. The library currently has two modules: <br>1. DataProcessing, which offers mitigation techniques for improving model performance for specific cohorts. <br>2. DataBalanceAnalysis, which provides metrics for diagnosing errors that originate from data imbalance either on class labels or feature values. <br> 3. Cohort: provides classes for handling and managing cohorts, which allows the creation of custom pipelines for each cohort in an easy and intuitive interface. The module also provides techniques for learning different decoupled estimators (models) for different cohorts and combining them in a way that optimizes different definitions of group fairness.|
[Responsible-AI-Tracker Repository](https://github.com/microsoft/responsible-ai-toolbox-tracker) |Responsible AI Toolbox Tracker is a JupyterLab extension for managing, tracking, and comparing results of machine learning experiments for model improvement. Using this extension, users can view models, code, and visualization artifacts within the same framework enabling therefore fast model iteration and evaluation processes. Main functionalities include: <br>1. Managing and linking model improvement artifacts<br> 2. Disaggregated model evaluation and comparisons<br>3. Integration with the Responsible AI Mitigations library<br>4. Integration with mlflow|
[Responsible-AI-Toolbox-GenBit Repository](https://github.com/microsoft/responsible-ai-toolbox-genbit) | The Responsible AI Gender Bias (GenBit) Library helps AI practitioners measure gender bias in Natural Language Processing (NLP) datasets. The main goal of GenBit is to analyze your text corpora and compute metrics that give insights into the gender bias present in a corpus.|
## Introducing Responsible AI dashboard
[Responsible AI dashboard](https://github.com/microsoft/responsible-ai-toolbox/blob/main/notebooks/responsibleaidashboard/tour.ipynb) is a single pane of glass, enabling you to easily flow through different stages of model debugging and decision-making. This customizable experience can be taken in a multitude of directions, from analyzing the model or data holistically, to conducting a deep dive or comparison on cohorts of interest, to explaining and perturbing model predictions for individual instances, and to informing users on business decisions and actions.
<p align="center">
<img src="https://raw.githubusercontent.com/microsoft/responsible-ai-widgets/main/img/responsible-ai-dashboard.png" alt="ResponsibleAIDashboard" width="750"/>
In order to achieve these capabilities, the dashboard integrates together ideas and technologies from several open-source toolkits in the areas of
- <b>Error Analysis</b> powered by [Error Analysis](https://github.com/microsoft/responsible-ai-widgets/blob/main/docs/erroranalysis-dashboard-README.md), which identifies cohorts of data with higher error rate than the overall benchmark. These discrepancies might occur when the system or model underperforms for specific demographic groups or infrequently observed input conditions in the training data.
- <b>Fairness Assessment</b> powered by [Fairlearn](https://github.com/fairlearn/fairlearn), which identifies which groups of people may be disproportionately negatively impacted by an AI system and in what ways.
- <b>Model Interpretability</b> powered by [InterpretML](https://github.com/interpretml/interpret-community), which explains blackbox models, helping users understand their model's global behavior, or the reasons behind individual predictions.
- <b>Counterfactual Analysis</b> powered by [DiCE](https://github.com/interpretml/DiCE), which shows feature-perturbed versions of the same datapoint who would have received a different prediction outcome, e.g., Taylor's loan has been rejected by the model. But they would have received the loan if their income was higher by $10,000.
- <b>Causal Analysis</b> powered by [EconML](https://github.com/microsoft/EconML), which focuses on answering What If-style questions to apply data-driven decision-making – how would revenue be affected if a corporation pursues a new pricing strategy? Would a new medication improve a patient’s condition, all else equal?
- <b>Data Balance</b> powered by [Responsible AI](https://github.com/microsoft/responsible-ai-toolbox/blob/main/docs/databalance-README.md), which helps users gain an overall understanding of their data, identify features receiving the positive outcome more than others, and visualize feature distributions.
Responsible AI dashboard is designed to achieve the following goals:
- To help further accelerate engineering processes in machine learning by enabling practitioners to design customizable workflows and tailor Responsible AI dashboards that best fit with their model assessment and data-driven decision making scenarios.
- To help model developers create end to end and fluid debugging experiences and navigate seamlessly through error identification and diagnosis by using interactive visualizations that identify errors, inspect the data, generate global and local explanations models, and potentially inspect problematic examples.
- To help business stakeholders explore causal relationships in the data and take informed decisions in the real world.
This repository contains the Jupyter notebooks with examples to showcase how to use this widget. Get started [here](https://github.com/microsoft/responsible-ai-toolbox/blob/main/notebooks/responsibleaidashboard/getting-started.ipynb).
### Installation
Use the following pip command to install the Responsible AI Toolbox.
If running in jupyter, please make sure to restart the jupyter kernel after installing.
```
pip install raiwidgets
```
### Responsible AI dashboard Customization
The Responsible AI Toolbox’s strength lies in its customizability. It empowers users to design tailored, end-to-end model debugging and decision-making workflows that address their particular needs. Need some inspiration? Here are some examples of how Toolbox components can be put together to analyze scenarios in different ways:
Please note that model overview (including fairness analysis) and data explorer components are activated by default!
| Responsible AI Dashboard Flow| Use Case |
|--|--|
| Model Overview -> Error Analysis -> Data Explorer | To identify model errors and diagnose them by understanding the underlying data distribution
| Model Overview -> Fairness Assessment -> Data Explorer | To identify model fairness issues and diagnose them by understanding the underlying data distribution
| Model Overview -> Error Analysis -> Counterfactuals Analysis and What-If | To diagnose errors in individual instances with counterfactual analysis (minimum change to lead to a different model prediction)
| Model Overview -> Data Explorer -> Data Balance | To understand the root cause of errors and fairness issues introduced via data imbalances or lack of representation of a particular data cohort
| Model Overview -> Interpretability | To diagnose model errors through understanding how the model has made its predictions
| Data Explorer -> Causal Inference | To distinguish between correlations and causations in the data or decide the best treatments to apply to see a positive outcome
| Interpretability -> Causal Inference | To learn whether the factors that model has used for decision making has any causal effect on the real-world outcome.
| Data Explorer -> Counterfactuals Analysis and What-If | To address customer questions about what they can do next time to get a different outcome from an AI.
| Data Explorer -> Data Balance | To gain an overall understanding of the data, identify features receiving the positive outcome more than others, and visualize feature distributions
### Useful Links
- [Take a tour of Responsible AI Dashboard](https://github.com/microsoft/responsible-ai-toolbox/blob/main/notebooks/responsibleaidashboard/tour.ipynb)
- [Get started](https://github.com/microsoft/responsible-ai-toolbox/blob/main/notebooks/responsibleaidashboard/getting-started.ipynb)
Model Debugging Examples:
- [Try the tool: model debugging of a census income prediction model (classification)](https://github.com/microsoft/responsible-ai-toolbox/tree/main/notebooks/responsibleaidashboard/responsibleaidashboard-census-classification-model-debugging.ipynb)
- [Try the tool: model debugging of a housing price prediction model (classification)](https://github.com/microsoft/responsible-ai-toolbox/tree/main/notebooks/responsibleaidashboard/responsibleaidashboard-housing-classification-model-debugging.ipynb)
- [Try the tool: model debugging of a diabetes progression prediction model (regression)](https://github.com/microsoft/responsible-ai-toolbox/tree/main/notebooks/responsibleaidashboard/responsibleaidashboard-diabetes-regression-model-debugging.ipynb)
- [Try the tool: model debugging of a fridge object detection model](https://github.com/microsoft/responsible-ai-toolbox/tree/main/notebooks/responsibleaidashboard/responsibleaidashboard-fridge-object-detection-model-debugging.ipynb)
Responsible Decision Making Examples:
- [Try the tool: make decisions for house improvements](https://github.com/microsoft/responsible-ai-toolbox/tree/main/notebooks/responsibleaidashboard/responsibleaidashboard-housing-decision-making.ipynb)
- [Try the tool: provide recommendations to patients using diabetes data](https://github.com/microsoft/responsible-ai-toolbox/tree/main/notebooks/responsibleaidashboard/responsibleaidashboard-diabetes-decision-making.ipynb)
- [Try the tool: model debugging of a fridge image classification model](https://github.com/microsoft/responsible-ai-toolbox/tree/main/notebooks/responsibleaidashboard/responsibleaidashboard-fridge-image-classification-model-debugging.ipynb)
- [Try the tool: model debugging of a fridge multilabel image classification model](https://github.com/microsoft/responsible-ai-toolbox/tree/main/notebooks/responsibleaidashboard/responsibleaidashboard-fridge-multilabel-image-classification-model-debugging.ipynb)
- [Try the tool: model debugging of a fridge object detection model](https://github.com/microsoft/responsible-ai-toolbox/blob/main/notebooks/responsibleaidashboard/responsibleaidashboard-fridge-object-detection-model-debugging.ipynb)
## Supported Models
This Responsible AI Toolbox API supports models that are trained on datasets in Python `numpy.ndarray`, `pandas.DataFrame`, `iml.datatypes.DenseData`, or `scipy.sparse.csr_matrix` format.
The explanation functions of [Interpret-Community](https://github.com/interpretml/interpret-community) accept both models and pipelines as input as long as the model or pipeline implements a `predict` or `predict_proba` function that conforms to the Scikit convention. If not compatible, you can wrap your model's prediction function into a wrapper function that transforms the output into the format that is supported (predict or predict_proba of Scikit), and pass that wrapper function to your selected interpretability techniques.
If a pipeline script is provided, the explanation function assumes that the running pipeline script returns a prediction. The repository also supports models trained via **PyTorch**, **TensorFlow**, and **Keras** deep learning frameworks.
## Other Use Cases
Tools within the Responsible AI Toolbox can also be used with AI models offered as APIs by providers such as [Azure Cognitive Services](https://azure.microsoft.com/en-us/services/cognitive-services/). To see example use cases, see the folders below:
- [Cognitive Services Speech to Text Fairness testing](https://github.com/microsoft/responsible-ai-toolbox/tree/main/notebooks/cognitive-services-examples/speech-to-text)
- [Cognitive Services Face Verification Fairness testing](https://github.com/microsoft/responsible-ai-toolbox/tree/main/notebooks/cognitive-services-examples/face-verification)
## Maintainers
- [Ke Xu](https://github.com/KeXu444)
- [Roman Lutz](https://github.com/romanlutz)
- [Ilya Matiach](https://github.com/imatiach-msft)
- [Gaurav Gupta](https://github.com/gaugup)
- [Vinutha Karanth](https://github.com/vinuthakaranth)
- [Tong Yu](https://github.com/tongyu-microsoft)
- [Ruby Zhu](https://github.com/RubyZ10)
- [Mehrnoosh Sameki](https://github.com/mesameki)
- [Hannah Westra](https://github.com/hawestra)
- [Ziqi Ma](https://github.com/ziqi-ma)
- [Kin Chan](https://github.com/kicha0)
| /responsibleai-0.30.0.tar.gz/responsibleai-0.30.0/README.md | 0.670608 | 0.888227 | README.md | pypi |
Analysis of Word Embedding Bias Metrics
=======================================
.. note::
This page is still work-in-progress.
There are two common ways to measure bias in word embedding intrinsically,
one is given by Tolga et al. work, and the second is called WEAT.
Both of the two approaches use the same building block:
cosine similarity between two word vectors,
but it seems that they capture bias differently.
For example, after a gender debiasing of Word2Vec model
using Tolga's methods, the gender-bias which is measured with WEAT score
is not eliminated. We might hypothesize that WEAT score measures bias
in a more profound sense.
In this page, we aim to bridge the gap between the two measures.
We will formulate the WEAT score using Tolga's terminology,
and observe its power.
We assume that you are familiar with these two papers:
- Bolukbasi, T., Chang, K. W., Zou, J. Y., Saligrama, V.,
& Kalai, A. T. (2016).
`Man is to computer programmer as woman is to homemaker?
debiasing word embeddings <https://arxiv.org/abs/1607.06520>`_.
in Advances in neural information processing systems
(pp. 4349-4357).
- Caliskan, A., Bryson, J. J., & Narayanan, A. (2017).
`Semantics derived automatically
from language corpora contain human-like biases
<http://opus.bath.ac.uk/55288/>`_.
Science, 356(6334), 183-186.
Let's start with the definition of the WEAT score.
Note that we will use "word", "vector" and "word vector" interchangeably.
Let :math:`X` and :math:`Y` be two sets of target words of equal size,
and :math:`A`and :math:`B` two sets of attribute words of equal size.
Let :math:`cos(\vec a, \vec b)` donate the cosine
of the angle between vector :math:`\vec a` and :math:`\vec b`.
We will assume the word embedding is normalized,
i.e., all its vectors have a norm equal to one.
Therefore, the cosine similarity between two word vectors
is the same as the inner product of these vectors
:math:`\langle\vec a, \vec b\rangle`.
The WEAT test statistic is
.. math::
s(X, Y, A, B)
= \sum\limits_{\vec x \in X}{s(\vec x, A, B)} - \sum\limits_{\vec y \in X}{s(\vec y, A, B)}
where
.. math::
s(w, A, B)
= mean_{\vec a \in A}cos(\vec w, \vec a) - mean_{\vec b \in B}cos(\vec w, \vec b)
Let :math:`N = |A| = |B|`. Then we can rewrite :math:`s(w, A, B)`:
.. math::
:nowrap:
\begin{eqnarray}
s(w, A, B) & = & mean_{\vec a \in A}cos(\vec w, \vec a) - mean_{\vec b \in B}cos(\vec w, \vec b) \\
& = & mean_{\vec a \in A}\langle\vec w, \vec a\rangle - mean_{\vec b \in B}\langle\vec w, \vec b\rangle \\
& = & \frac{1}{N} \sum\limits_{\vec a \in A} \langle\vec w, \vec a\rangle - \frac{1}{N} \sum\limits_{\vec b \in b} \langle\vec w, \vec b\rangle
\end{eqnarray}
Using the linearity of the inner product:
.. math::
:nowrap:
\begin{eqnarray}
& = & \frac{1}{N} \langle\vec w, \sum\limits_{\vec a \in A} \vec a\rangle - \frac{1}{N} \langle\vec w, \sum\limits_{\vec b \in b} \vec b\rangle \\
& = & \frac{1}{N} \langle\vec w, \sum\limits_{\vec a \in A} \vec a - \sum\limits_{\vec b \in b} \vec b\rangle
\end{eqnarray}
Let's define:
.. math::
\vec d_{AB} = \sum\limits_{\vec a \in A} \vec a - \sum\limits_{\vec b \in b} \vec b
And then:
.. math::
s(w, A, B) = \frac{1}{N} \langle\vec w, \vec d_{AB}\rangle
So :math:`s(w, A, B)` measures the association between
a word :math:`\vec w` and a direction :math:`\vec d_{AB}`
which is defined by two sets of words :math:`A` and :math:`B`.
This is a key point, we formulated the low-level part of WEAT
using the notion of a direction in a word embedding.
Tolga's paper suggests three ways to come up with a direction
in a word embedding between two concepts:
1. Have two words, one for each end, :math:`\vec a` and :math:`\vec b`,
and substruct them to get :math:`\vec d = \vec a - \vec b`.
Then, normalize :math:`\vec d`.
For example, :math:`\overrightarrow{she} - \overrightarrow{he}`.
2. Have two sets of words, one for each end,
:math:`\vec A` and :math:`\vec B`,
calculate the normalized sum of each group,
then subtract the sums and normalized again.
Up to a factor, this is precisely :math:`d_{AB}`!
Nevertheless, this factor might be matter,
as it changes for every check in the p-value calculation
using the permutation test.
This will be examined experimentally in the future.
3. The last method has a stronger assumption,
it requires having a set of pairs of words,
one from the concept :math:`A` and the other from the concept :math:`B`.
For example, she-he and mother-father.
We won't describe the method here.
Note that this is the method that Tolga's paper use
to define the gender direction for debiasing.
The first method is basically the same as the second method,
when :math:`A` and :math:`B` contain each only one word vector.
Now, let's move forward to rewrite the WEAT score itself:
.. math::
:nowrap:
\begin{eqnarray}
s(X, Y, A, B) & = & \sum\limits_{\vec x \in X}{s(\vec x, A, B)} - \sum\limits_{\vec y \in X}{s(\vec y, A, B)} \\
& = & \frac{1}{N}\sum\limits_{\vec x \in X}\langle\vec x, \vec d_{AB}\rangle - \frac{1}{N}\sum\limits_{\vec y \in Y}\langle\vec y, \vec d_{AB}\rangle \\
& = & \frac{1}{N}\langle\sum\limits_{\vec x \in X} \vec x, \vec d_{AB}\rangle - \frac{1}{N}\langle\sum\limits_{\vec y \in Y} \vec y, \vec d_{AB}\rangle \\
& = & \frac{1}{N}\langle\sum\limits_{\vec x \in X} \vec x - \sum\limits_{\vec y \in Y} \vec y, \vec d_{AB}\rangle \\
& = & \frac{1}{N}\langle\vec d_{XY}, \vec d_{AB}\rangle
\end{eqnarray}
This formulation allows us to see what the WEAT score is really about:
measuring the association between two directions.
Each direction is defined by two concepts ends,
such as Female-Male, Science-Art, Pleasent-Unpleasant.
It explains why WEAT seems like a more deeper measure of bias,
In the WEAT score, the direction is defined by two sets of words,
one for each end. As mentioned above, Tolga's paper
suggests two more methods for specifying the direction.
Note that the WEAT score is scaled only with the size of
:math:`A` and :math:`B`,
because :math:`s(X, Y, A, B)` only sums over :math:`X` and :math:`Y`
and doesn't use the mean, in contrast to :math:`s(\vec w, A, B)`.
Besides, even though the perspective of association between
two directions may help us to understand better what WEAT score measure,
the original formulation matters to compute the p-value.
Tolga's direct bias works a bit different. Given a biad direction
:math:`\vec d`
and a set of neutral words :math:`W`, then:
.. math::
DirectBias(\vec d, W) = \frac{1}{|W|}\sum\limits_{\vec w \in W} |\langle \vec d, \vec w \rangle|
The bias direction :math:`\vec d` can be defined with
one of the three methods described above,
including the WEAT flavored one as :math:`\vec d_{AB}`
with two word sets :math:`A` and :math:`B`.
The direct bias definition lacks the second direction,
and it is indeed easier to debias, as it requires removing the
:math:`\vec d` part from all the neutral words in the vocabulary.
In Tolga's papar there is another metric - indirect bias - that takes
two words (:math:`\vec v, \vec u`) and the (bias) direction (:math:`\vec d`),
and measures the shared proportion of the two word projections
on the bias direction:
.. math::
IndirectBias(\vec d, \vec v, \vec w) = \frac{\langle \vec d, \vec v \rangle \langle \vec d, \vec w \rangle}{\langle \vec v, \vec w \rangle}
Therefore, we can formalize the WEAT score as a measure
of association between two concept directions in a word embedding.
Practically, the WEAT score uses two sets of words to define a direction,
while in Tolga's paper, there are an additional two more methods.
| /responsibly-0.1.3.tar.gz/responsibly-0.1.3/docs/word-embedding-bias-metric-analysis.rst | 0.959345 | 0.969728 | word-embedding-bias-metric-analysis.rst | pypi |
from responsive.subject import Subject
from responsive.wrapper import DictWrapper, ListWrapper
def __make_responsive_for_list(root: object, parent: list) -> None:
"""Modify recursive object to be responsive.
Args:
root (object): the main object that should be responsive
parent (object): the current parent in the hierachy
"""
for index, value in enumerate(parent):
current_type = type(value)
if not current_type.__module__ == "builtins" or isinstance(value, dict):
wrapped_value = DictWrapper(value, make_responsive, root=root)
__make_responsive_for_dict(root, value)
wrapped_value.add_observer(root)
parent[index] = wrapped_value
elif isinstance(value, list):
wrapped_value = ListWrapper(value, make_responsive, root=root)
__make_responsive_for_list(root, value)
wrapped_value.add_observer(root)
parent[index] = wrapped_value
def __make_responsive_for_dict(root: object, parent: object) -> None:
"""Modify recursive object to be responsive.
Args:
root (object): the main object that should be responsive
parent (object): the current parent in the hierachy
"""
the_dict = parent if isinstance(parent, dict) else parent.__dict__
for key, value in the_dict.items():
current_type = type(value)
if not current_type.__module__ == "builtins" or isinstance(value, dict):
wrapped_value = DictWrapper(value, make_responsive, root=root)
__make_responsive_for_dict(root, value)
wrapped_value.add_observer(root)
the_dict[key] = wrapped_value
elif isinstance(value, list):
wrapped_value = ListWrapper(value, make_responsive, root=root)
__make_responsive_for_list(root, value)
wrapped_value.add_observer(root)
the_dict[key] = wrapped_value
def __is_class(obj):
"""Checking an object to be a user defined class."""
current_type = type(obj)
return str(current_type).startswith("<class") and not current_type.__module__ == "builtins"
def make_responsive(obj: object, root: Subject = None) -> object:
"""Modify object to be responsive.
Args:
obj (object): the object to modify
root (Subject): another root
Returns:
Modified object.
"""
if isinstance(obj, list):
wrapped_list = ListWrapper(obj, make_responsive, root=root)
__make_responsive_for_list(root if root is not None else wrapped_list, obj)
if root is not None:
wrapped_list.add_observer(root)
return wrapped_list
if isinstance(obj, dict) or __is_class(obj):
wrapped_dict_or_class = DictWrapper(obj, make_responsive, root=root)
__make_responsive_for_dict(root if root is not None else wrapped_dict_or_class, obj)
if root is not None:
wrapped_dict_or_class.add_observer(root)
return wrapped_dict_or_class
return obj | /responsive-data-1.0.4.tar.gz/responsive-data-1.0.4/responsive/data.py | 0.896407 | 0.171529 | data.py | pypi |
from typing import Any
from responsive.constants import Context, Operation
from responsive.observer import Observer
from responsive.subject import Subject
class DictWrapper(Subject, Observer):
"""Wrapper for a dictionary object."""
def __init__(self, obj: object, make_responsive: callable, root: Subject = None):
"""Initialize wrapper.
Args:
obj (objec): object to wrap.
make_responsive (callable): function to make responsive
root (Subject): root object receiving notifications
"""
super().__init__()
self.make_responsive = make_responsive
self.root = root
self.obj = obj
def __repr__(self) -> str:
"""Get string representation of wrapped data.
Returns:
string representation of wrapped data.
"""
return f"{self.obj}"
def __setattr__(self, name: str, value: Any) -> None:
"""Creating attribute 'obj' or changing one of its attributes.
Args:
name (str): name of the attribute.
value (Any): value of the attribute.
"""
if "obj" in self.__dict__:
if isinstance(self.obj, dict):
old_value = self.obj[name]
self.obj[name] = self.make_responsive(
value, root=self.root if self.root is not None else self
)
self.notify(
id=id(self),
context=Context.DICTIONARY,
name=name,
old=old_value,
new=value,
operation=Operation.VALUE_CHANGED,
)
else:
old_value = self.obj.__dict__[name]
self.obj.__dict__[name] = self.make_responsive(
value, root=self.root if self.root is not None else self
)
self.notify(
id=id(self),
context=Context.CLASS,
name=name,
old=old_value,
new=value,
operation=Operation.VALUE_CHANGED,
)
else:
super().__setattr__(name, value)
def __getattr__(self, name: str) -> Any:
"""Get value of attribute.
Args:
name (str): name of the attribute
Returns:
value of the attribute.
"""
if isinstance(self.obj, dict):
return self.obj[name]
return self.obj.__dict__[name]
def __len__(self):
"""Get length of dictionary."""
return len(self.obj)
def update(self, subject: object, *args: Any, **kwargs: Any):
"""Called when related subject has changed.
Args:
subject (object): the one who does the notification.
*args (Any): optional positional arguments
**kwargs (Any): optional key/value arguments
"""
self.notify(*args, **kwargs)
def __eq__(self, other: object) -> bool:
"""Comparing two lists.
Args:
other (object): another object to compare with
Returns:
true when equal, otherwise false.
"""
if isinstance(other, dict):
return self.obj.__eq__(other)
if isinstance(other, DictWrapper):
return self.obj.__eq__(other.obj)
return False
def __hash__(self):
"""Calculating hash of underlying object."""
return hash(tuple(sorted(self.obj.items())))
class ListWrapper(Subject, Observer):
"""Wrapper for a dictionary object."""
def __init__(self, obj: object, make_responsive: callable, root: Subject = None):
"""Initialize wrapper.
Args:
obj (objec): object to wrap.
make_responsive (callable): function to make responsive
root (Subject): root object receiving notifications
"""
super().__init__()
self.make_responsive = make_responsive
self.root = root
self.obj = obj
def __repr__(self) -> str:
"""Get string representation of wrapped data.
Returns:
string representation of wrapped data.
"""
return f"{self.obj}"
def append(self, value):
"""Appending a value to the list."""
self.obj.append(value)
self.notify(id=id(self), context=Context.LIST, new=value, operation=Operation.VALUE_ADDED)
def remove(self, value):
"""Removing a value to the list."""
self.obj.remove(value)
self.notify(id=id(self), context=Context.LIST, old=value, operation=Operation.VALUE_REMOVED)
def __setitem__(self, index, value):
"""Change value at given index."""
old_value = self.obj[index]
self.obj[index] = self.make_responsive(
value, root=self.root if self.root is not None else self
)
self.notify(
id=id(self),
context=Context.LIST,
index=index,
old=old_value,
new=value,
operation=Operation.VALUE_CHANGED,
)
def __getitem__(self, index):
"""Get value at given index."""
return self.obj[index]
def __len__(self):
"""Get length of list."""
return len(self.obj)
def update(self, subject: object, *args: Any, **kwargs: Any):
"""Called when related subject has changed.
Args:
subject (object): the one who does the notification.
*args (Any): optional positional arguments
**kwargs (Any): optional key/value arguments
"""
self.notify(*args, **kwargs)
def __eq__(self, other: object) -> bool:
"""Comparing two lists.
Args:
other (object): another object to compare with
Returns:
true when equal, otherwise false.
"""
if isinstance(other, list):
return self.obj.__eq__(other)
if isinstance(other, ListWrapper):
return self.obj.__eq__(other.obj)
return False
def __hash__(self):
"""Calculating hash of underlying object."""
return hash(self.obj) | /responsive-data-1.0.4.tar.gz/responsive-data-1.0.4/responsive/wrapper.py | 0.955236 | 0.152158 | wrapper.py | pypi |
from collections.abc import Callable
from typing import Any
class Observer:
"""Observer from the subject/observer pattern."""
def update(self, subject: object, *args: Any, **kwargs: Any):
"""Called when related subject has changed.
Args:
subject (object): the one who does the notification.
*args (Any): optional positional arguments
**kwargs (Any): optional key/value arguments
"""
raise NotImplementedError()
def get_interests(self) -> dict[str, Callable[[Any], bool]]: # pylint: disable=no-self-use
"""Telling a subject the interest. When providing {} then all changes
are of interest (default) otherwise the interest is related to a name
and a function for the value telling - when the name is related to the
change - whether the value is of interest. If not interest does not
match the notification (updated) is not done.
Returns:
dictionary with names and functions (idea: `is_relevant(value)`)
"""
return {}
class DefaultObserver(Observer):
"""A simple observer class."""
def __init__(self):
"""Initializing empty list of reveived updates."""
super().__init__()
self.__updates = []
self.__interests = {}
def update(self, subject: object, *args: Any, **kwargs: Any) -> None:
"""Called when the subject has been changed.
Args:
subject (object): the one who does the notification.
*args (Any): optional positional arguments
**kwargs (Any): optional key/value arguments
"""
self.__updates.append((subject, args, kwargs))
def set_interests(self, interests: dict[str, Callable[[Any], bool]]) -> None:
"""Change interests.
Args:
interests (dict[str, Callable[[Any], bool]]): new interests.
"""
self.__interests = interests
def get_interests(self) -> dict[str, Callable[[Any], bool]]:
"""Telling a subject the interests.
Returns:
dictionary with names and functions (idea: `is_relevant(value)`)
"""
return self.__interests
def __iter__(self):
"""Allows iterating over the updates of this observer."""
return iter(self.__updates)
def clear(self):
"""Delete all recently updated."""
self.__updates.clear()
def get_count_updates(self):
"""Provide number of updates."""
return len(self.__updates)
class DoNothingObserver(Observer):
"""Does nothing (more of a test)."""
def update(self, subject: object, *args: Any, **kwargs: Any) -> None:
"""Called when the subject has been changed.
Args:
subject (object): the one who does the notification.
*args (Any): optional positional arguments
**kwargs (Any): optional key/value arguments
"""
class OutputObserver(Observer):
"""Output a line for each update. Default output function is `print`."""
def __init__(self, output_function=print):
"""Initialize observer with output function."""
self.__output_function = output_function
def update(self, subject: object, *args: Any, **kwargs: Any) -> None:
"""Called when the subject has been changed.
Args:
subject (object): the one who does the notification.
*args (Any): optional positional arguments
**kwargs (Any): optional key/value arguments
"""
self.__output_function(
f"subject with id {id(subject)} has notified with {args} and {kwargs}"
) | /responsive-data-1.0.4.tar.gz/responsive-data-1.0.4/responsive/observer.py | 0.950858 | 0.385635 | observer.py | pypi |
from bs4 import BeautifulSoup
import uuid
from respysive.utils import _parse_style_class
class Content:
"""
A class representing a slide content.
"""
def __init__(self):
self.content = ""
self.scripts = {}
self.grid_cols = 0
def clear(self):
self.content = ""
def add_script(self, name: str, script: str):
"""
Add a script to the HTML document
:param name : name of the script
:param script : script to add
"""
self.scripts[name] = script
def add_heading(self, text: str, tag: str = "h3", icon: str = None, **kwargs):
"""
Add a heading element to the HTML document.
:param text: The text of the heading.
:param tag: The HTML tag to use for the heading. Default is 'h1'.
:param icon: The icon of the heading (optional).
:param kwargs: Additional CSS styles and html class to apply to the image. (optional)
The keys should be in the format of CSS property names with '_' instead of '-', example: font_size
you can also pass the class key with a string or a list of strings
example : {'font_size': '20px', 'color': 'blue', 'class':'my-class'} or {'font_size': '20px',
'color': 'blue', 'class':['my-class','my-second-class']}
"""
if tag not in ["h1", "h2", "h3", "h4", "h5"]:
raise ValueError("Invalid tag, the tag must be one of h1, h2, h3, h4 or h5")
s = _parse_style_class(kwargs)
self.content += (
f"<{tag} {s}><i class='{icon}'></i> {text}</{tag}>"
if icon
else f"<{tag} {s}>{text}</{tag}>"
)
def add_text(self, text: str, tag: str = "p", **kwargs):
"""
Add a text element to the HTML document.
:param text: The text to be added.
:param tag: The HTML tag to use for the text. Default is 'p'.
:param kwargs: Additional CSS styles and html class to apply to the image. (optional)
The keys should be in the format of CSS property names with '_' instead of '-', example: font_size
you can also pass the class key with a string or a list of strings
example : {'font_size': '20px', 'color': 'blue', 'class':'my-class'} or {'font_size': '20px',
'color': 'blue', 'class':['my-class','my-second-class']}
"""
if tag not in ["p", "span"]:
raise ValueError("Invalid tag, the tag must be one of p or span")
s = _parse_style_class(kwargs)
self.content += f"""<{tag} {s}>{text}</{tag}>"""
def add_list(
self, items: list, ordered=False, **kwargs):
"""
Add a list element to the HTML document.
:param items: The items of the list.
:param ordered: Whether the list should be ordered or not.
:param kwargs: Additional CSS styles and html class to apply to the image. (optional)
The keys should be in the format of CSS property names with '_' instead of '-', example: font_size
you can also pass the class key with a string or a list of strings
example : {'font_size': '20px', 'color': 'blue', 'class':'my-class'} or {'font_size': '20px',
'color': 'blue', 'class':['my-class','my-second-class']}
"""
list_tag = "ol" if ordered else "ul"
s = _parse_style_class(kwargs)
list_items = "\n".join([f"<li>{item}</li>" for item in items])
self.content += f"<{list_tag} {s}>\n{list_items}\n</{list_tag}>"
def add_image(self, src: str, alt: str = "", **kwargs):
"""
Add an image element to the HTML document.
:param src: The source of the image.
:param alt: The alternative text for the image.
:param kwargs: Additional CSS styles and html class to apply to the image. (optional)
The keys should be in the format of CSS property names with '_' instead of '-', example: font_size
you can also pass the class key with a string or a list of strings
example : {'font_size': '20px', 'color': 'blue', 'class':'my-class'} or {'font_size': '20px',
'color': 'blue', 'class':['my-class','my-second-class']}
"""
if 'class' not in kwargs:
kwargs['class'] = []
elif isinstance(kwargs['class'], str):
kwargs['class'] = [kwargs['class']]
kwargs['class'].append('img-fluid')
s = _parse_style_class(kwargs)
self.content += f"<img data-src='{src}' alt='{alt}' {s}>"
def add_svg(self, svg: str, **kwargs):
"""
Add a svg to the document.
:param svg : The code of the svg.
:param kwargs: Additional CSS styles and html class to apply to the image. (optional)
The keys should be in the format of CSS property names with '_' instead of '-', example: font_size
you can also pass the class key with a string or a list of strings
example : {'font_size': '20px', 'color': 'blue', 'class':'my-class'} or {'font_size': '20px',
'color': 'blue', 'class':['my-class','my-second-class']}
"""
if 'class' not in kwargs:
kwargs['class'] = []
elif isinstance(kwargs['class'], str):
kwargs['class'] = [kwargs['class']]
kwargs['class'].append('img-fluid')
s = _parse_style_class(kwargs)
self.content += f"""<div {s}>{svg}</div>"""
def add_plotly(self, json: str, **kwargs):
"""
Add a plotly json to the document.
:param json : a plotly json (fig.to_json()).
:param kwargs: Additional CSS styles and html class to apply to the image. (optional)
The keys should be in the format of CSS property names with '_' instead of '-', example: font_size
you can also pass the class key with a string or a list of strings
example : {'font_size': '20px', 'color': 'blue', 'class':'my-class'} or {'font_size': '20px',
'color': 'blue', 'class':['my-class','my-second-class']}
"""
if 'class' not in kwargs:
kwargs['class'] = []
elif isinstance(kwargs['class'], str):
kwargs['class'] = [kwargs['class']]
kwargs['class'].append('img-fluid')
s = _parse_style_class(kwargs)
# avoid empty chart
j = json.replace("'", "\u2019")
chart_id = "chart-" + str(uuid.uuid4())
self.content += f"""<div {s} id='{chart_id}'></div>
<script>var Plotjson = '{j}';
var figure = JSON.parse(Plotjson);
Plotly.newPlot('{chart_id}', figure.data, figure.layout);</script>"""
def add_altair(self, json: str, **kwargs):
"""
Add an Altair json to the document.
:param json : an Altair json (chart.to_json()).
:param kwargs: Additional CSS styles and html class to apply to the image. (optional)
The keys should be in the format of CSS property names with '_' instead of '-', example: font_size
you can also pass the class key with a string or a list of strings
example : {'font_size': '20px', 'color': 'blue', 'class':'my-class'} or {'font_size': '20px',
'color': 'blue', 'class':['my-class','my-second-class']}
"""
if 'class' not in kwargs:
kwargs['class'] = []
elif isinstance(kwargs['class'], str):
kwargs['class'] = [kwargs['class']]
kwargs['class'].append('img-fluid')
s = _parse_style_class(kwargs)
chart_id = "chart-" + str(uuid.uuid4())
self.content += f"""<div {s} id='{chart_id}'></div>
<script>var opt = {{renderer: "svg"}};
vegaEmbed("#{chart_id}", {json} , opt);</script>"""
def add_div(self, div: str, **kwargs):
"""
Add a simple div.
:param div : whatever you want that can fit in a div .
:param kwargs: Additional CSS styles and html class to apply to the image. (optional)
The keys should be in the format of CSS property names with '_' instead of '-', example: font_size
you can also pass the class key with a string or a list of strings
example : {'font_size': '20px', 'color': 'blue', 'class':'my-class'} or {'font_size': '20px',
'color': 'blue', 'class':['my-class','my-second-class']}
"""
s = _parse_style_class(kwargs)
self.content += f"""<div {s}>{div}</div>"""
def render(self):
"""
Return the complete HTML document as a string.
"""
html = f"""<div>{self.content}</div>"""
soup = BeautifulSoup(html, "html.parser")
ident_content = soup.prettify()
return ident_content | /respysive_slide-1.1.7-py3-none-any.whl/respysive/content.py | 0.73173 | 0.280275 | content.py | pypi |
from respysive.utils import _parse_style_class
from respysive import Content
import os
import re
import json
def _check_content_type(col: str):
"""
Check if the content type is supported by the function
:param col: The content type to check
:type col: str
"""
def _check_altair(_col):
"""
Check if the input is a Altair chart
:param chart: The chart to check
:type chart: altair.vegalite.v3.api.Chart
"""
if isinstance(_col, str):
return "https://vega.github.io/schema/vega-lite" in _col
elif isinstance(_col, dict):
_col = json.dumps(_col)
return "https://vega.github.io/schema/vega-lite" in _col
def _check_plotly(_col):
"""
Check if the input is a Plotly chart
:param chart: The chart to check
:type chart: plotly.graph_objs._figure.Figure
"""
if isinstance(_col, str):
return """{"data":[{""" in _col
elif isinstance(_col, dict):
_col = json.dumps(_col)
return """{"data":[{""" in _col
center = {'class': ['d-flex', 'justify-content-center', 'mx-auto']}
if os.path.isfile(col):
if os.path.splitext(col)[1].lower() in ['.jpg', '.jpeg', '.png', '.gif', '.tif', '.apng', '.bmp', '.svg']:
c = Content()
c.add_image(col, **center)
col = c.render()
elif re.match(
r'(http(s)?:\/\/.)?(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)',
col):
if re.search(r'\.(jpg|jpeg|png|gif|tif|apng|bmp|svg)', col):
c = Content()
c.add_image(col, **center)
col = c.render()
elif _check_altair(col):
c = Content()
c.add_altair(col, **center)
col = c.render()
elif _check_plotly(col):
c = Content()
c.add_plotly(col, **center)
col = c.render()
else:
c = Content()
c.add_text(col)
col = c.render()
return col
def _add_list_classes(text: str):
"""
Add 'list-classes' class to <ul> and <ol> tags in the text.
:param text: str, the text where the class should be added
:return: str, the text with the class added
"""
text = re.sub(r'<ul>', '<ul class="list-group list-group-flush">', text)
text = re.sub(r'<li>', '<li class="list-group-item" style="background-color: transparent;" >', text)
return text
def _append_class(_style, _class):
"""
Append a class to the style dictionary.
:param _style: dict, the style dictionary
:param _class: str, the class to append
:return: dict, the style dictionary with the class appended
"""
if 'class' not in _style:
_style['class'] = []
elif isinstance(_style['class'], str):
_style['class'] = [_style['class']]
_style['class'].append(_class)
return _style
def _append_style(_style, _style_to_append):
"""
Append a style to the style dictionary.
:param _style: dict, the style dictionary
:param _style_to_append: dict, the style to append
:return: dict, the style dictionary with the style appended
"""
_style.update(_style_to_append)
return _style
def _check_styles(styles, *args):
"""
Check the styles for each element.
:param styles: list, a list of styles, one for each element
:param args: list, a list of elements
:return: list, a list of styles with the same length as the elements, with default styles for missing elements
"""
if styles is None:
styles = [{} for _ in range(len(args[0]))]
for i, arg in enumerate(args):
if len(arg) != len(styles):
raise ValueError(f"{arg} and styles must have the same length")
class Slide:
"""
A class representing a slide in a presentation.
"""
def __init__(self, center=False, **kwargs):
self.content = ""
self.center = center
self.kwargs = kwargs
def add_title(self, text: str, tag: str = "h3", icon: str = None, **kwargs):
"""
Add a heading element to the HTML document.
:param text: The text of the heading.
:param tag: The HTML tag to use for the heading. Default is 'h1'.
:param icon: The icon of the heading (optional).
:param kwargs: Additional CSS styles and html class to apply to the image. (optional)
The keys should be in the format of CSS property names with '_' instead of '-', example: font_size
you can also pass the class key with a string or a list of strings
example : {'font_size': '20px', 'color': 'blue', 'class':'my-class'} or {'font_size': '20px',
'color': 'blue', 'class':['my-class','my-second-class']}
"""
c = Content()
c.add_heading(text, tag, icon, **kwargs)
row = "<div class='row'><div class='col-12 mx-auto'>"
self.content += row + c.render() + "</div></div>"
def add_content(self, content: list, columns=None, styles: list = None):
"""
Add content to the slide
:param content : list of strings
:param columns : list of int representing the size of each column
:param kwargs : list of additional css styles to apply to each column
"""
if columns is None:
columns = [12]
_check_styles(styles, content, columns)
row = "<div class='row'>"
for i in range(len(content)):
col = content[i]
if isinstance(col, str):
col = _check_content_type(col)
if styles and len(styles) > i:
col = f"<div class='col-md-{columns[i]}' {_parse_style_class(styles[i])}>{col}</div>"
else:
col = f"<div class='col-md-{columns[i]}'>{col}</div>"
row += col
self.content += row + "</div>"
def add_card(self, cards: list, styles: list = None):
"""
Add a card with a title and a content, to the slide.
:param cards: list of dictionaries that contains the following keys: 'title', 'content', 'image'
:param styles: list of dictionaries that contains the css styles for each card. The keys of the dictionaries are: 'title', 'content', 'image'
"""
_check_styles(styles, cards)
if styles is None:
styles = [{'class': 'bg-info'}]*len(cards)
cards_html = ""
for card, style in zip(cards, styles):
if 'class' not in style:
style['class'] = []
elif isinstance(style['class'], str):
style['class'] = [style['class']]
style['class'].append('card h-100')
s = _parse_style_class(style)
card_html = ""
for key in card.keys():
if key == 'image':
card_html += f'<img src="{card[key]}" class="card-img-top mx-auto" alt="">'
elif key == 'title':
card_html += f'<h4 class="card-title">{card[key]}</h4>'
elif key == 'text':
card[key] = _add_list_classes(card[key])
card_html += f'<p class="card-text" style="font-size:60%">{card[key]}</p>'
cards_html += f"""
<div class="col">
<div {s}>
{card_html}
</div>
</div>"""
self.content += f"<div class='row'>{cards_html}</div>"
def add_title_page(self, title_page_content: dict, styles: list = None):
"""
Add a title page to the slide
:param title_page_content: dictionary that contains the following keys: 'title', 'subtitle', 'authors', 'logo'
:param styles: list of dictionaries that contains the css styles for each element of the title page. The keys of the dictionaries are: 'title', 'subtitle', 'authors', 'logo'
"""
title = title_page_content.get('title', '')
subtitle = title_page_content.get('subtitle', '')
authors = title_page_content.get('authors', '')
logo = title_page_content.get('logo', '')
_check_styles(styles, title_page_content)
if styles is None:
styles = []
title_s = _parse_style_class(styles[0]) if styles else ""
subtitle_s = _parse_style_class(styles[1]) if styles else ""
authors_s = _parse_style_class(styles[2]) if styles else ""
logo_s = _parse_style_class(styles[3]) if styles else ""
title_html = f'<div class="row"><div class="col-12"><h2 {title_s}">{title}</h2></div></div>' if title else ''
subtitle_html = f'<div class="row"><div class="col-12"><h3 {subtitle_s}">{subtitle}</h3></div></div>' if subtitle else ''
authors_html = f'<div class="col-9"><h4 {authors_s}">{authors}</h3></div>' if authors else ''
logo_html = f'<div class="col-3 "><img src="{logo}" {logo_s}"></div>' if logo else ''
authors_logo_html = f'<div class="row align-items-center">{authors_html}{logo_html}</div>'
title_page_html = f'<div class="title-page">{title_html}{subtitle_html}{authors_logo_html}</div>'
self.content += title_page_html | /respysive_slide-1.1.7-py3-none-any.whl/respysive/container.py | 0.706292 | 0.281943 | container.py | pypi |
# RESSEG
Automatic segmentation of postoperative brain resection cavities from magnetic resonance images (MRI) using a convolutional neural network (CNN) trained with [PyTorch](https://pytorch.org/) 1.7.1.
## Installation
It's recommended to use [`conda`](https://docs.conda.io/en/latest/miniconda.html).
A 6-GB GPU is large enough to segment an image in an MNI space of size 193 × 229 × 193.
```shell
conda create -n resseg python=3.8 -y
conda activate resseg
pip install light-the-torch
ltt install torch
pip install resseg
resseg --help
```
## Usage
Below are two examples of cavity segmentation for tumor and epilepsy surgery. The epilepsy example includes registration to the [MNI space](https://www.lead-dbs.org/about-the-mni-spaces/).
Both examples can be run online using Google Colab:
[](https://colab.research.google.com/github/fepegar/resseg/blob/master/RESSEG.ipynb)
### BITE
Example using an image from the
[Brain Images of Tumors for Evaluation database (BITE)](http://nist.mni.mcgill.ca/?page_id=672).
```shell
BITE=`resseg-download bite`
resseg $BITE -o bite_seg.nii.gz
```

### EPISURG
Example using an image from the [EPISURG dataset](https://doi.org/10.5522/04/9996158.v1).
Segmentation works best when images are in the MNI space, so `resseg` includes a tool
for this purpose (requires [`antspyx](https://antspyx.readthedocs.io/en/latest/?badge=latest)).
```shell
pip install antspyx
EPISURG=`resseg-download episurg`
resseg-mni $EPISURG -t episurg_to_mni.tfm
resseg $EPISURG -o episurg_seg.nii.gz -t episurg_to_mni.tfm
```

## Trained model
The trained model can be used without installing `resseg`, but you'll need to install `unet` first:
```shell
pip install unet==0.7.7
```
Then, in Python:
```python
import torch
repo = 'fepegar/resseg'
model_name = 'ressegnet'
model = torch.hub.load(repo, model_name, pretrained=True)
```
## Graphical user interface using 3D Slicer
There is an experimental graphical user interface (GUI) built on top of [3D Slicer](https://www.slicer.org/).
Visit [this repository](https://github.com/fepegar/SlicerParcellation#brain-resection-cavity-segmentation) for additional information and installation instructions.

## Plotting resected structures
A quantitative analysis of the resected structures can be performed using a brain parcellation computed using [GIF](http://niftyweb.cs.ucl.ac.uk/program.php?p=GIF) (3.0) or [FreeSurfer](https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/AnatomicalROI).
```python
from resseg.parcellation import GIFParcellation, FreeSurferParcellation
parcellation_path = 't1_seg_gif.nii.gz'
cavity_seg_on_preop_path = 'cavity_on_preop.nii.gz'
parcellation = GIFParcellation(parcellation_path)
```
I used a sphere near the hippocampus to simulate the resection cavity segmentation, and the GIF parcellation in the [FPG dataset](https://torchio.readthedocs.io/datasets.html#fpg) of [TorchIO](https://torchio.readthedocs.io/).
```python
parcellation.print_percentage_of_resected_structures(cavity_seg_on_preop_path)
```
```
Percentage of each resected structure:
100% of Left vessel
83% of Left Inf Lat Vent
59% of Left Amygdala
58% of Left Hippocampus
26% of Left PIns posterior insula
24% of Left PP planum polare
21% of Left Basal Forebrain
18% of Left Claustrum
16% of Left PHG parahippocampal gyrus
15% of Left Pallidum
15% of Left Ent entorhinal area
13% of Left FuG fusiform gyrus
13% of Left Temporal White Matter
11% of Left Putamen
10% of Left Insula White Matter
5% of Left ITG inferior temporal gyrus
5% of Left periventricular white matter
5% of Left Ventral DC
The resection volume is composed of:
30% is Left Temporal White Matter
12% is Left Hippocampus
10% is Left Insula White Matter
7% is Left FuG fusiform gyrus
6% is Left Amygdala
4% is Left ITG inferior temporal gyrus
4% is Left PP planum polare
3% is Left Putamen
3% is Left Claustrum
3% is Left PIns posterior insula
3% is Left PHG parahippocampal gyrus
2% is [Unkown label: 4]
1% is Left Ent entorhinal area
1% is Left Pallidum
1% is Left Inf Lat Vent
1% is Left Ventral DC
```
```python
parcellation.plot_bars(cavity_seg_on_preop_path)
```

```python
parcellation.plot_pie(cavity_seg_on_preop_path)
```

## Credit
If you use this library for your research, please cite the following publications:
[F. Pérez-García et al., 2020, *Simulation of Brain Resection for Cavity Segmentation Using Self-Supervised and Semi-Supervised Learning*](https://link.springer.com/chapter/10.1007%2F978-3-030-59716-0_12).
[F. Pérez-García et al., 2021, *A self-supervised learning strategy for postoperative brain cavity segmentation simulating resections*](https://link.springer.com/article/10.1007/s11548-021-02420-2).
If you use the [EPISURG dataset](https://doi.org/10.5522/04/9996158.v1), which was used to train the model, please cite the following publication:
[F. Pérez-García et al., 2020, *EPISURG: a dataset of postoperative magnetic resonance images (MRI) for quantitative analysis of resection neurosurgery for refractory epilepsy*. University College London. Dataset.](https://doi.org/10.5522/04/9996158.v1)
## See also
- [`resector`](https://github.com/fepegar/resector) was used to simulate brain resections during training
- [TorchIO](http://torchio.rtfd.io/) was also used extensively. Both `resseg` and `resector` require this library.
| /resseg-0.3.7.tar.gz/resseg-0.3.7/README.md | 0.520253 | 0.985229 | README.md | pypi |
# Ressenter
Ressenter is a command line tool to pull content from Dissenter.com, a browser-based social network operated by Gab.com. (We will not reward either of these domains with hyperlinks.)
This tool does not require any authentication with Dissenter; all the data it pulls is available publicly.
Currently, this tool can:
* Reliably pull all comments made on Dissenter within the last seven days
* Pull the current 'top' comments
* Pull the current 'controversial' comments
* Pull the current trending URLs
* Pull all the comments for a particular URL
* Pull all the comments made by a particular user
## Robustness
This tool was made by reverse engineering Dissenter's API. (To be fair, it wasn't that hard.) Because we have no insight into Dissenter's internals, there's no guarantee that this tool provides an exhaustive or reliable archive of Dissenter content.
For example, we don't know whether comments become inaccessible after some period of time, or whether there is a limit on how many comments we can pull from any particular user.
## Usage
```
Usage: ressenter [OPTIONS] COMMAND [ARGS]...
Options:
--format [jsonl|csv] output format
--help Show this message and exit.
Commands:
comments Pull all the most recent comments
trending Pull the current trending URLs
url Pull comments for a particular URL.
user Pull all the comments of a particular user
```
Ressenter can output data to `jsonl` and `csv` (the default is `jsonl`). Just pass the `--format` option before the subcommand (e.g., `ressenter --format=csv comments`). All data is currently written to `stdout`; to save output to a file, use pipes (e.g., `ressenter comments > comments.jsonl`).
### `comments`
```
Usage: ressenter comments [OPTIONS]
Pull all the most recent comments
Options:
--sort [latest|controversial|top]
comment sort order
--after-id TEXT pull no earlier than this comment ID
--after-time TEXT pull no comments posted earlier than this
time
--max INTEGER maximum number of comments to pull
--help Show this message and exit.
```
### `trending`
```
Usage: ressenter trending [OPTIONS]
Pull the current trending URLs
Options:
--help Show this message and exit.
```
### `url`
```
Usage: ressenter url [OPTIONS] URL
Pull comments for a particular URL. Note that several comment metadata
items (such as upvotes, downvotes, and comments) are not available when
pulling comments from a URL.
Options:
--sort [latest|controversial|top]
comment sort order
--after-id TEXT pull no earlier than this comment ID
--after-time TEXT pull no comments posted earlier than this
time
--max INTEGER maximum number of comments to pull
--help Show this message and exit.
```
### `user`
```
Usage: ressenter user [OPTIONS] USER
Pull all the comments of a particular user, identified by their UID
Options:
--sort [latest|controversial|top]
comment sort order
--after-id TEXT pull no earlier than this comment ID
--after-time TEXT pull no comments posted earlier than this
time
--max INTEGER maximum number of comments to pull
--help Show this message and exit.
```
## Playbook
Here are some common use cases:
#### Pull all the most recent comments
```bash
ressenter comments
```
#### Pull all the recent top comments
```bash
ressenter comments --sort=top
```
#### Pull all the recent controversial comments
```bash
ressenter comments --sort=controversial
```
#### Pull all comments made in the past hour
```bash
ressenter comments --after-time "one hour ago"
```
#### Pull all the current trending URLs
```bash
ressenter trending
```
#### Pull all of the comments for a particular URL
```bash
ressenter url https://www.facebook.com
```
## Module Usage
To use Ressenter as a Python module, just import it and setup a listener—a function that will be called on every result. You may also want to disable the standard output. For example:
```python
import ressenter
results = []
ressenter.disable_standard_output()
ressenter.result_listeners.append(results.append)
ressenter.result_listeners.append(lambda k: print(f"Output: {k}"))
ressenter.comments()
print(f"Found {len(results)} results!")
```
All the commands are imported at the top-level namespace (e.g., `ressenter.comments`, `ressenter.trending`, `ressenter.url`, `ressenter.user`) and support the same arguments as their command-line equivalents. Consult the source code and the command-level docs for more information about the specific parameters supported.
## Development
To run Ressenter locally, perform the following steps:
1. Install dependencies with `pipenv install`
2. Activate the virtual environment with `pipenv shell`
3. Run the tool using `main.py` -- for example, `./main.py comments`
## Packaging and Publishing
1. Make sure you have access to PyPI credentials with permission for the `ressenter` repository.
2. Clear the `dist/` folder (`rm dist/*`).
3. Package everything with `python setup.py sdist bdist_wheel`.
4. Check the packaging with `twine check dist/*`.
5. Upload with `twine upload dist/*`.
## Troubleshooting
If you work at the Stanford Internet Observatory, ping Miles McCain on Slack or via email to get help with Ressenter. To report bugs or submit feature requests, please open an issue.
## Desired Features
There are a few features that this tool currently lacks, but that we'd like to add. We haven't yet found reliable ways to extract this data. (If you have, please let us know!)
* Find the most recent URLs commented on
* Iterate through all the URLs with comments
* Iterate through all comments, instead of just those made in the past seven days | /ressenter-0.0.10.tar.gz/ressenter-0.0.10/README.md | 0.582729 | 0.846514 | README.md | pypi |
import os
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Optional
from ResSimpy.Aquifer import Aquifer
from ResSimpy.Equilibration import Equilibration
from ResSimpy.File import File
from ResSimpy.Gaslift import Gaslift
from ResSimpy.Grid import Grid
from ResSimpy.Hydraulics import Hydraulics
from ResSimpy.Network import Network
from ResSimpy.PVT import PVT
from ResSimpy.RelPerm import RelPerm
from ResSimpy.Rock import Rock
from ResSimpy.Separator import Separator
from ResSimpy.Valve import Valve
from ResSimpy.Water import Water
from ResSimpy.Wells import Wells
@dataclass(kw_only=True, init=False)
class Simulator(ABC):
_start_date: str
_origin: str
_wells: Wells
_pvt: PVT
_separator: Separator
_water: Water
_equil: Equilibration
_rock: Rock
_relperm: RelPerm
_valve: Valve
_aquifer: Aquifer
_hydraulics: Hydraulics
_gaslift: Gaslift
_network: Network
_grid: Optional[Grid]
_model_files: File
"""Class Properties"""
@property
def start_date(self) -> str:
return self._start_date
@start_date.setter
def start_date(self, value) -> None:
self._start_date = value
@property
def wells(self) -> Wells:
return self._wells
@property
def pvt(self) -> PVT:
return self._pvt
@property
def separator(self) -> Separator:
return self._separator
@property
def water(self) -> Water:
return self._water
@property
def equil(self) -> Equilibration:
return self._equil
@property
def rock(self) -> Rock:
return self._rock
@property
def relperm(self) -> RelPerm:
return self._relperm
@property
def valve(self) -> Valve:
return self._valve
@property
def aquifer(self) -> Aquifer:
return self._aquifer
@property
def hydraulics(self) -> Hydraulics:
return self._hydraulics
@property
def gaslift(self) -> Gaslift:
return self._gaslift
@property
def network(self) -> Network:
return self._network
@property
def grid(self) -> Optional[Grid]:
"""Pass the grid information to the front end."""
return self._grid
@property
def origin(self) -> str:
return self._origin
@origin.setter
def origin(self, value: Optional[str]) -> None:
if value is None:
raise ValueError(f'Origin path to model is required. Instead got {value}.')
self._origin: str = value.strip()
@property
def model_location(self) -> str:
"""Returns the location of the model."""
return os.path.dirname(self._origin)
@property
def model_files(self) -> File:
return self._model_files
""" Class Methods """
@staticmethod
@abstractmethod
def get_fluid_type(surface_file_name: str) -> str:
raise NotImplementedError("This method has not been implemented for this simulator yet")
@abstractmethod
def set_output_path(self, path: str) -> None:
raise NotImplementedError("Implement this method on the derived class")
@abstractmethod
def get_date_format(self) -> str:
"""Returns date format as a string."""
raise NotImplementedError("Implement this method on the derived class") | /ressimpy-1.0.3.tar.gz/ressimpy-1.0.3/ResSimpy/Simulator.py | 0.887644 | 0.368207 | Simulator.py | pypi |
from abc import ABC, abstractmethod
from typing import Any, Sequence, Optional
from uuid import UUID
import pandas as pd
from ResSimpy.Enums.UnitsEnum import UnitSystem
from ResSimpy.File import File
class NetworkOperationsMixIn(ABC):
@abstractmethod
def get_all(self) -> Sequence[Any]:
raise NotImplementedError("Implement this in the derived class")
@abstractmethod
def get_by_name(self, name: str) -> Optional[Any]:
raise NotImplementedError("Implement this in the derived class")
@abstractmethod
def get_df(self) -> pd.DataFrame:
raise NotImplementedError("Implement this in the derived class")
@abstractmethod
def get_overview(self) -> str:
raise NotImplementedError("Implement this in the derived class")
@abstractmethod
def load(self, file: File, start_date: str, default_units: UnitSystem) -> None:
raise NotImplementedError("Implement this in the derived class")
@abstractmethod
def _add_to_memory(self, additional_objs: Optional[list[Any]]):
raise NotImplementedError("Implement this in the derived class")
@abstractmethod
def remove(self, obj_to_remove: UUID | dict[str, None | str | float | int]) -> None:
raise NotImplementedError("Implement this in the derived class")
@abstractmethod
def add(self, obj_to_add: dict[str, None | str | float | int]) -> None:
raise NotImplementedError("Implement this in the derived class")
@abstractmethod
def modify(self, obj_to_modify: dict[str, None | str | float | int],
new_properties: dict[str, None | str | float | int]) -> None:
raise NotImplementedError("Implement this in the derived class")
@property
@abstractmethod
def table_header(self) -> str:
raise NotImplementedError("Implement this in the derived class")
@property
@abstractmethod
def table_footer(self) -> str:
raise NotImplementedError("Implement this in the derived class")
@property
@abstractmethod
def _network_element_name(self) -> str:
raise NotImplementedError("Implement this in the derived class") | /ressimpy-1.0.3.tar.gz/ressimpy-1.0.3/ResSimpy/OperationsMixin.py | 0.91112 | 0.15374 | OperationsMixin.py | pypi |
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Optional
from uuid import UUID
@dataclass
class FileBase(ABC):
"""The abstract base class for simulator files.
Attributes:
location (str): Full path to file location
file_content_as_list (list[str]): List of lines in the file
"""
location: Optional[str] = None
file_content_as_list: Optional[list[str]] = field(default=None, repr=False)
@abstractmethod
def write_to_file(self) -> None:
raise NotImplementedError("Implement this in the derived class")
@property
@abstractmethod
def get_flat_list_str_file(self) -> list[str]:
raise NotImplementedError("Implement this in the derived class")
@abstractmethod
def add_object_locations(self, obj_uuid: UUID, line_indices: list[int]) -> None:
raise NotImplementedError("Implement this in the derived class")
@abstractmethod
def insert_comments(self, additional_content: list[str], comments) -> list[str]:
raise NotImplementedError("Implement this in the derived class")
@abstractmethod
def get_object_locations_for_id(self, obj_id: UUID) -> list[int]:
raise NotImplementedError("Implement this in the derived class")
@abstractmethod
def remove_object_from_file_as_list(self, objects_to_remove: list[UUID]) -> None:
raise NotImplementedError("Implement this in the derived class")
@abstractmethod
def add_to_file_as_list(self, additional_content: list[str], index: int,
additional_objects: Optional[dict[UUID, list[int]]] = None,
comments: Optional[str] = None) -> None:
raise NotImplementedError("Implement this in the derived class")
@abstractmethod
def remove_from_file_as_list(self, index: int, objects_to_remove: Optional[list[UUID]] = None,
string_to_remove: Optional[str] = None) -> None:
raise NotImplementedError("Implement this in the derived class") | /ressimpy-1.0.3.tar.gz/ressimpy-1.0.3/ResSimpy/FileBase.py | 0.877588 | 0.165155 | FileBase.py | pypi |
import uuid
from abc import ABC
from dataclasses import dataclass
from typing import Optional
from ResSimpy.ISODateTime import ISODateTime
from ResSimpy.Nexus.NexusEnums import DateFormatEnum
@dataclass(kw_only=True)
class Completion(ABC):
"""A class representing well completions.
IMPORTANT: if modifying this class, make sure to update the relevant tests in test_load_wells, as well as updating
the constructor calls in the derived classes.
Args:
----
date (str): The starting date of the completion. Applies until changed.
i (Optional[int]): The structured grid cell location in the x direction. 'IW' in Nexus
j (Optional[int]): The structured grid cell location in the y direction. 'JW' in Nexus
k (Optional[int]): The structured grid cell location in the z direction. 'L' in Nexus
skin (Optional[float]): The skin value for the completion. 'SKIN' in Nexus
depth (Optional[float]): The depth of the completion. 'DEPTH' in Nexus
well_radius (Optional[float]): The well radius. 'RADW' in Nexus
x (Optional[float]): The x location of the well in distance units/coordinates. 'X' in Nexus
y (Optional[float]): The y location of the well in distance units/coordinates. 'Y' in Nexus
angle_a (Optional[float]): the angle relative to the local I axis. 'ANGLA' in Nexus.
angle_v (Optional[float]): the angle relative to the true vertical axis (global Z axis). 'ANGLV' in Nexus
grid (Optional[str]): the grid name to which the completion data applies. 'GRID' in Nexus
depth_to_top (Optional[float]): subsea depth to the top of a completion interval. 'DTOP' in Nexus
depth_to_bot (Optional[float]): subsea depth to the bottom of the completion interval. 'DBOT' in Nexus
perm_thickness_ovr (Optional[float]): permeability thickness override value to use for the completion interval.\
'KH' in Nexus.
dfactor (Optional[float]): non-darcy factor to use for rate dependent skin calculations. 'D' in Nexus
rel_perm_method (Optional[int]): rel perm method to use for the completion. 'IRELPM' in Nexus
status (Optional[str]): the status of the layer, can be 'ON' or 'OFF'
"""
__date: str
__i: Optional[int] = None
__j: Optional[int] = None
__k: Optional[int] = None
__skin: Optional[float] = None
__depth: Optional[float] = None
__well_radius: Optional[float] = None
__x: Optional[float] = None
__y: Optional[float] = None
__angle_a: Optional[float] = None
__angle_v: Optional[float] = None
__grid: Optional[str] = None
__depth_to_top: Optional[float] = None
__depth_to_bottom: Optional[float] = None
__perm_thickness_ovr: Optional[float] = None
__dfactor: Optional[float] = None
__rel_perm_method: Optional[int] = None
__status: Optional[str] = None
__iso_date: Optional[ISODateTime] = None
__date_format: Optional[DateFormatEnum.DateFormat] = None
__start_date: Optional[str] = None
def __init__(self, date: str, i: Optional[int] = None, j: Optional[int] = None, k: Optional[int] = None,
skin: Optional[float] = None, depth: Optional[float] = None, well_radius: Optional[float] = None,
x: Optional[float] = None, y: Optional[float] = None, angle_a: Optional[float] = None,
angle_v: Optional[float] = None, grid: Optional[str] = None, depth_to_top: Optional[float] = None,
depth_to_bottom: Optional[float] = None, perm_thickness_ovr: Optional[float] = None,
dfactor: Optional[float] = None, rel_perm_method: Optional[int] = None,
status: Optional[str] = None, date_format: Optional[DateFormatEnum.DateFormat] = None,
start_date: Optional[str] = None) -> None:
self.__well_radius = well_radius
self.__date = date
self.__i = i
self.__j = j
self.__k = k
self.__skin = skin
self.__depth = depth
self.__x = x
self.__y = y
self.__angle_a = angle_a
self.__angle_v = angle_v
self.__grid = grid
self.__depth_to_top = depth_to_top
self.__depth_to_bottom = depth_to_bottom
self.__perm_thickness_ovr = perm_thickness_ovr
self.__dfactor = dfactor
self.__rel_perm_method = rel_perm_method
self.__status = status
self.__id: uuid.UUID = uuid.uuid4()
self.__date_format = date_format
self.__start_date = start_date
self.__iso_date = self.set_iso_date()
@property
def well_radius(self):
return self.__well_radius
@property
def date(self):
return self.__date
@property
def iso_date(self):
return self.__iso_date
@property
def i(self):
return self.__i
@property
def j(self):
return self.__j
@property
def k(self):
return self.__k
@property
def skin(self):
return self.__skin
@property
def depth(self):
return self.__depth
@property
def x(self):
return self.__x
@property
def y(self):
return self.__y
@property
def angle_a(self):
return self.__angle_a
@property
def angle_v(self):
return self.__angle_v
@property
def grid(self):
return self.__grid
@property
def depth_to_top(self):
return self.__depth_to_top
@property
def depth_to_bottom(self):
return self.__depth_to_bottom
@property
def perm_thickness_ovr(self):
return self.__perm_thickness_ovr
@property
def dfactor(self):
return self.__dfactor
@property
def rel_perm_method(self):
return self.__rel_perm_method
@property
def status(self):
return self.__status
@property
def id(self):
return self.__id
@property
def date_format(self):
return self.__date_format
@property
def start_date(self):
return self.__start_date
def to_dict(self) -> dict[str, None | float | int | str]:
attribute_dict = {
'well_radius': self.__well_radius,
'date': self.__date,
'i': self.__i,
'j': self.__j,
'k': self.__k,
'skin': self.__skin,
'depth': self.__depth,
'x': self.__x,
'y': self.__y,
'angle_a': self.__angle_a,
'angle_v': self.__angle_v,
'grid': self.__grid,
'depth_to_top': self.__depth_to_top,
'depth_to_bottom': self.__depth_to_bottom,
'perm_thickness_ovr': self.__perm_thickness_ovr,
'dfactor': self.__dfactor,
'rel_perm_method': self.__rel_perm_method,
'status': self.__status,
}
return attribute_dict
def set_iso_date(self) -> ISODateTime:
return ISODateTime.convert_to_iso(self.date, self.date_format, self.start_date) | /ressimpy-1.0.3.tar.gz/ressimpy-1.0.3/ResSimpy/Completion.py | 0.893018 | 0.515864 | Completion.py | pypi |
from dataclasses import dataclass
from abc import ABC
from typing import Optional
@dataclass
class RelPermEndPoint(ABC):
__swl: Optional[float] = None
__swr: Optional[float] = None
__swu: Optional[float] = None
__sgl: Optional[float] = None
__sgr: Optional[float] = None
__sgu: Optional[float] = None
__swro: Optional[float] = None
__sgro: Optional[float] = None
__sgrw: Optional[float] = None
__krw_swro: Optional[float] = None
__krw_swu: Optional[float] = None
__krg_sgro: Optional[float] = None
__krg_sgu: Optional[float] = None
__kro_swl: Optional[float] = None
__kro_swr: Optional[float] = None
__kro_sgl: Optional[float] = None
__kro_sgr: Optional[float] = None
__krw_sgl: Optional[float] = None
__krw_sgr: Optional[float] = None
__krg_sgrw: Optional[float] = None
__sgtr: Optional[float] = None
__sotr: Optional[float] = None
def __init__(self, swl: Optional[float] = None, swr: Optional[float] = None, swu: Optional[float] = None,
sgl: Optional[float] = None, sgr: Optional[float] = None, sgu: Optional[float] = None,
swro: Optional[float] = None, sgro: Optional[float] = None, sgrw: Optional[float] = None,
krw_swro: Optional[float] = None, krw_swu: Optional[float] = None, krg_sgro: Optional[float] = None,
krg_sgu: Optional[float] = None, kro_swl: Optional[float] = None, kro_swr: Optional[float] = None,
kro_sgl: Optional[float] = None, kro_sgr: Optional[float] = None, krw_sgl: Optional[float] = None,
krw_sgr: Optional[float] = None, krg_sgrw: Optional[float] = None, sgtr: Optional[float] = None,
sotr: Optional[float] = None) -> None:
self.__swl = swl
self.__swr = swr
self.__swu = swu
self.__sgl = sgl
self.__sgr = sgr
self.__sgu = sgu
self.__swro = swro
self.__sgro = sgro
self.__sgrw = sgrw
self.__krw_swro = krw_swro
self.__krw_swu = krw_swu
self.__krg_sgro = krg_sgro
self.__krg_sgu = krg_sgu
self.__kro_swl = kro_swl
self.__kro_swr = kro_swr
self.__kro_sgl = kro_sgl
self.__kro_sgr = kro_sgr
self.__krw_sgl = krw_sgl
self.__krw_sgr = krw_sgr
self.__krg_sgrw = krg_sgrw
self.__sgtr = sgtr
self.__sotr = sotr
def to_dict(self) -> dict[str, Optional[float]]:
attribute_dict: dict[str, Optional[float]] = {
'swl': self.__swl,
'swr': self.__swr,
'swu': self.__swu,
'sgl': self.__sgl,
'sgr': self.__sgr,
'sgu': self.__sgu,
'swro': self.__swro,
'sgro': self.__sgro,
'sgrw': self.__sgrw,
'krw_swro': self.__krw_swro,
'krw_swu': self.__krw_swu,
'krg_sgro': self.__krg_sgro,
'krg_sgu': self.__krg_sgu,
'kro_swl': self.__kro_swl,
'kro_swr': self.__kro_swr,
'kro_sgl': self.__kro_sgl,
'kro_sgr': self.__kro_sgr,
'krw_sgl': self.__krw_sgl,
'krw_sgr': self.__krw_sgr,
'krg_sgrw': self.__krg_sgrw,
'sgtr': self.__sgtr,
'sotr': self.__sotr,
}
return attribute_dict | /ressimpy-1.0.3.tar.gz/ressimpy-1.0.3/ResSimpy/RelPermEndPoint.py | 0.877556 | 0.201224 | RelPermEndPoint.py | pypi |
from abc import ABC
from dataclasses import dataclass
from typing import Optional, Sequence, Union
from ResSimpy.Completion import Completion
from ResSimpy.Enums.UnitsEnum import UnitSystem
@dataclass
class Well(ABC):
__completions: list[Completion]
__well_name: str
__units: UnitSystem
def __init__(self, well_name: str, completions: list[Completion], units: UnitSystem) -> None:
self.__well_name = well_name
self.__completions = completions
self.__units = units
@property
def completions(self) -> list[Completion]:
return self.__completions
@property
def well_name(self) -> str:
return self.__well_name
@property
def units(self) -> UnitSystem:
return self.__units
@property
def perforations(self) -> Sequence[Completion]:
"""Returns a list of all of the perforations for the well."""
raise NotImplementedError("This method has not been implemented for this simulator yet")
@property
def first_perforation(self) -> Optional[Completion]:
"""Returns the first perforation for the well."""
raise NotImplementedError("This method has not been implemented for this simulator yet")
@property
def shutins(self) -> Sequence[Completion]:
"""Returns a list of all of the perforations for the well."""
raise NotImplementedError("This method has not been implemented for this simulator yet")
@property
def last_shutin(self) -> Optional[Completion]:
"""Returns the first perforation for the well."""
raise NotImplementedError("This method has not been implemented for this simulator yet")
@property
def printable_well_info(self) -> str:
"""Returns some printable well information in string format."""
raise NotImplementedError("This method has not been implemented for this simulator yet")
@property
def completion_events(self) -> list[tuple[str, Union[int, tuple[float, float]]]]:
"""Returns a list of dates and values representing either the layer, or the depths of each perforation."""
raise NotImplementedError("This method has not been implemented for this simulator yet") | /ressimpy-1.0.3.tar.gz/ressimpy-1.0.3/ResSimpy/Well.py | 0.91388 | 0.325896 | Well.py | pypi |
from typing import Optional
from ResSimpy.Nexus.NexusEnums.DateFormatEnum import DateFormat
from datetime import datetime, timedelta
class ISODateTime(datetime):
"""A class representing an extension of datetime class, returns back date in ISO datetime format."""
def __repr__(self) -> str:
"""Return the object representation, but formatted in ISO format."""
basic_string = super().__repr__()
iso_string = basic_string.replace(' ', 'T')
return iso_string
def __str__(self) -> str:
"""Return the string representation, but formatted in ISO format."""
basic_string = super().__str__()
iso_string = basic_string.replace(' ', 'T')
return iso_string
@staticmethod
def isfloat(no_of_days: str) -> bool:
if no_of_days is not None:
try:
float(no_of_days)
return True
except ValueError:
return False
else:
return False
@classmethod
def convert_to_iso(cls, date: str, date_format: str, start_date: Optional[str] = None) -> 'ISODateTime':
converted_date = None
if date_format is None:
raise ValueError('Please provide date format')
if ISODateTime.isfloat(date) and start_date is None:
raise ValueError('Please provide start date when date is numeric')
elif ISODateTime.isfloat(date) and start_date is not None:
if date_format == DateFormat.DD_MM_YYYY:
converted_date = ISODateTime.strptime(start_date, '%d/%m/%Y') + timedelta(days=float(date))
elif date_format == DateFormat.MM_DD_YYYY:
converted_date = ISODateTime.strptime(start_date, '%m/%d/%Y') + timedelta(days=float(date))
elif date_format == DateFormat.DD_MM_YYYY:
converted_date = ISODateTime.strptime(date, '%d/%m/%Y')
elif date_format == DateFormat.MM_DD_YYYY:
converted_date = ISODateTime.strptime(date, '%m/%d/%Y')
if converted_date is None:
raise ValueError('Invalid date format or missing start_date.')
return converted_date | /ressimpy-1.0.3.tar.gz/ressimpy-1.0.3/ResSimpy/ISODateTime.py | 0.933249 | 0.28597 | ISODateTime.py | pypi |
from typing import Any
from ResSimpy.Enums.UnitsEnum import UnitSystem
def to_dict(nexus_object: Any, keys_in_nexus_style: bool = False, add_date: bool = True, add_units: bool = True,
include_nones: bool = True) -> dict[str, None | str | int | float]:
"""Returns a dictionary of the attributes of a Nexus object. Requires a nexus mapping dictionary.
Useful for creating dataframes of objects.
Args:
----
nexus_object (Any): Nexus object with a mapping dictionary defined
keys_in_nexus_style (bool): if True returns the key values in Nexus keywords, otherwise returns the
attribute name as stored by ressimpy
add_date (bool): adds a date attribute if it exists
add_units (bool): adds a units attribute if it exists.
include_nones (bool): If False filters the nones out of the dictionary. Defaults to True
Returns:
-------
a dictionary keyed by attributes and values as the value of the attribute
"""
mapping_dict = nexus_object.get_nexus_mapping()
if keys_in_nexus_style:
result_dict = {x: nexus_object.__getattribute__(y[0]) for x, y in mapping_dict.items()}
else:
result_dict = {y[0]: nexus_object.__getattribute__(y[0]) for y in mapping_dict.values()}
if add_date:
try:
result_dict['date'] = getattr(nexus_object, 'date')
except AttributeError:
raise AttributeError('Date was requested from the object but does not have a date associated with it.'
f'Try setting add_date to False. Full contents of object: {nexus_object}')
if add_units:
try:
unit_sys = getattr(nexus_object, 'unit_system')
except AttributeError:
raise AttributeError(
'Unit system was requested from the object but does not have a unit system associated with it.'
f'Try setting add_units to False. Full contents of the object: {nexus_object}')
if isinstance(unit_sys, UnitSystem):
result_dict['unit_system'] = unit_sys.value
if not include_nones:
result_dict = {k: v for k, v in result_dict.items() if v is not None}
return result_dict | /ressimpy-1.0.3.tar.gz/ressimpy-1.0.3/ResSimpy/Utils/to_dict_generic.py | 0.868241 | 0.307579 | to_dict_generic.py | pypi |
from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING, Union
from uuid import UUID
import pandas as pd
if TYPE_CHECKING:
from ResSimpy.Nexus.DataModels.NexusFile import NexusFile
from ResSimpy.Nexus.DataModels.NexusWaterMethod import NexusWaterParams
# Factory methods for generating empty lists with typing
def get_empty_list_str() -> list[str]:
value: list[str] = []
return value
# Factory method for generating empty dictionary with typing
def get_empty_dict_union() -> dict[str, Union[str, int, float, Enum, list[str], pd.DataFrame,
dict[str, Union[float, pd.DataFrame]]]]:
value: dict[str, Union[str, int, float, Enum, list[str], pd.DataFrame, dict[str, Union[float, pd.DataFrame]]]] = {}
return value
# Factory method for generating empty dictionary for eos options
def get_empty_eosopt_dict_union() -> \
dict[str, Union[str, int, float, pd.DataFrame, list[str], dict[str, float], tuple[str, dict[str, float]], dict[
str, pd.DataFrame]]]:
value: dict[str, Union[
str, int, float, pd.DataFrame, list[str], dict[str, float], tuple[str, dict[str, float]], dict[
str, pd.DataFrame]]] = {}
return value
# Factory method for generating empty dictionary for hysteresis parameters
def get_empty_hysteresis_dict() -> dict[str, Union[str, float, dict[str,
Union[str, float, dict[str, Union[str, float]]]]]]:
value: dict[str, Union[str, float, dict[str, Union[str, float, dict[str, Union[str, float]]]]]] = {}
return value
def get_empty_list_str_nexus_file() -> list[Union[str, NexusFile]]:
value: list[Union[str, NexusFile]] = []
return value
def get_empty_list_nexus_file() -> list[NexusFile]:
value: list[NexusFile] = []
return value
def get_empty_dict_int_nexus_file() -> dict[int, NexusFile]:
value: dict[int, NexusFile] = {}
return value
def get_empty_dict_uuid_list_int() -> dict[UUID, list[int]]:
value: dict[UUID, list[int]] = {}
return value
def get_empty_list_nexus_water_params() -> list[NexusWaterParams]:
value: list[NexusWaterParams] = []
return value | /ressimpy-1.0.3.tar.gz/ressimpy-1.0.3/ResSimpy/Utils/factory_methods.py | 0.78609 | 0.317479 | factory_methods.py | pypi |
from dataclasses import dataclass
import os
from typing import Optional, MutableMapping
from ResSimpy.Nexus.DataModels.NexusFile import NexusFile
from ResSimpy.Nexus.DataModels.NexusGasliftMethod import NexusGasliftMethod
from ResSimpy.Gaslift import Gaslift
@dataclass(kw_only=True)
class NexusGasliftMethods(Gaslift):
"""Class for collection of Nexus gaslift methods.
Attributes:
inputs (dict[int, NexusGasliftMethod]): Collection of Nexus gaslift methods, as a dictionary
files (dict[int, NexusFile]): Dictionary collection of gaslift files, as defined in Nexus fcs file.
"""
__inputs: MutableMapping[int, NexusGasliftMethod]
__files: dict[int, NexusFile]
__properties_loaded: bool = False # Used in lazy loading
def __init__(self, inputs: Optional[MutableMapping[int, NexusGasliftMethod]] = None,
files: Optional[dict[int, NexusFile]] = None) -> None:
if inputs:
self.__inputs = inputs
else:
self.__inputs: MutableMapping[int, NexusGasliftMethod] = {}
if files:
self.__files = files
else:
self.__files = {}
super().__init__()
def __repr__(self) -> str:
"""Pretty printing gaslift methods."""
if not self.__properties_loaded:
self.load_gaslift_methods()
printable_str = ''
for table_num in self.__inputs.keys():
printable_str += '\n--------------------------------\n'
printable_str += f'GASLIFT method {table_num}\n'
printable_str += '--------------------------------\n'
printable_str += self.__inputs[table_num].__repr__()
printable_str += '\n'
return printable_str
@property
def inputs(self) -> MutableMapping[int, NexusGasliftMethod]:
if not self.__properties_loaded:
self.load_gaslift_methods()
return self.__inputs
@property
def files(self) -> dict[int, NexusFile]:
return self.__files
def load_gaslift_methods(self):
# Read in gaslift properties from Nexus gaslift method files
if self.__files is not None and len(self.__files) > 0: # Check if gaslift files exist
for table_num in self.__files.keys(): # For each gaslift property method
gaslift_file = self.__files[table_num]
if gaslift_file.location is None:
raise ValueError(f'Unable to find gaslift file: {gaslift_file}')
if os.path.isfile(gaslift_file.location):
# Create NexusGasliftMethod object
self.__inputs[table_num] = NexusGasliftMethod(file=gaslift_file, input_number=table_num)
self.__inputs[table_num].read_properties() # Populate object with gaslift props in file
self.__properties_loaded = True | /ressimpy-1.0.3.tar.gz/ressimpy-1.0.3/ResSimpy/Nexus/NexusGasliftMethods.py | 0.833426 | 0.160727 | NexusGasliftMethods.py | pypi |
from dataclasses import dataclass
import os
from typing import Optional, MutableMapping
from ResSimpy.Nexus.DataModels.NexusFile import NexusFile
from ResSimpy.Nexus.DataModels.NexusRelPermMethod import NexusRelPermMethod
from ResSimpy.RelPerm import RelPerm
@dataclass(kw_only=True)
class NexusRelPermMethods(RelPerm):
"""Class for collection of Nexus relative permeability and capillary pressure property inputs.
Attributes:
inputs (dict[int, NexusRelPermMethod]): Collection of Nexus relperm property inputs, as a dictionary
files (dict[int, NexusFile]): Dictionary collection of relperm property files, as defined in Nexus fcs.
"""
__inputs: MutableMapping[int, NexusRelPermMethod]
__files: dict[int, NexusFile]
__properties_loaded: bool = False # Used in lazy loading
def __init__(self, inputs: Optional[MutableMapping[int, NexusRelPermMethod]] = None,
files: Optional[dict[int, NexusFile]] = None) -> None:
if inputs:
self.__inputs = inputs
else:
self.__inputs: MutableMapping[int, NexusRelPermMethod] = {}
if files:
self.__files = files
else:
self.__files = {}
super().__init__()
def __repr__(self) -> str:
"""Pretty printing relative permeability and capillary pressure methods."""
if not self.__properties_loaded:
self.load_relperm_methods()
printable_str = ''
for table_num in self.__inputs.keys():
printable_str += '\n--------------------------------\n'
printable_str += f'RELPM method {table_num}\n'
printable_str += '--------------------------------\n'
printable_str += self.__inputs[table_num].__repr__()
printable_str += '\n'
return printable_str
@property
def inputs(self) -> MutableMapping[int, NexusRelPermMethod]:
if not self.__properties_loaded:
self.load_relperm_methods()
return self.__inputs
@property
def files(self) -> dict[int, NexusFile]:
return self.__files
def load_relperm_methods(self):
# Read in relperm properties from Nexus relperm method files
if self.__files is not None and len(self.__files) > 0: # Check if relperm files exist
for table_num in self.__files.keys(): # For each relperm property method
relperm_file = self.__files[table_num]
if relperm_file.location is None:
raise ValueError(f'Unable to find relperm file: {relperm_file}')
if os.path.isfile(relperm_file.location):
# Create NexusRelPermMethod object
self.__inputs[table_num] = NexusRelPermMethod(file=relperm_file, input_number=table_num)
# Populate object with relperm properties in file
self.__inputs[table_num].read_properties()
self.__properties_loaded = True | /ressimpy-1.0.3.tar.gz/ressimpy-1.0.3/ResSimpy/Nexus/NexusRelPermMethods.py | 0.841696 | 0.156201 | NexusRelPermMethods.py | pypi |
from dataclasses import dataclass
import os
from typing import Optional, MutableMapping
from ResSimpy.Nexus.DataModels.NexusFile import NexusFile
from ResSimpy.Nexus.DataModels.NexusEquilMethod import NexusEquilMethod
from ResSimpy.Equilibration import Equilibration
@dataclass(kw_only=True)
class NexusEquilMethods(Equilibration):
"""Class for collection of Nexus equilibration methods.
Attributes:
inputs (dict[int, NexusEquilMethod]): Collection of Nexus equilibration methods, as a dictionary
files (dict[int, NexusFile]): Dictionary collection of equilibration files, as defined in Nexus fcs file.
"""
__inputs: MutableMapping[int, NexusEquilMethod]
__files: dict[int, NexusFile]
__properties_loaded: bool = False # Used in lazy loading
def __init__(self, inputs: Optional[MutableMapping[int, NexusEquilMethod]] = None,
files: Optional[dict[int, NexusFile]] = None) -> None:
if inputs:
self.__inputs = inputs
else:
self.__inputs: MutableMapping[int, NexusEquilMethod] = {}
if files:
self.__files = files
else:
self.__files = {}
super().__init__()
def __repr__(self) -> str:
"""Pretty printing equil methods."""
if not self.__properties_loaded:
self.load_equil_methods()
printable_str = ''
for table_num in self.__inputs.keys():
printable_str += '\n--------------------------------\n'
printable_str += f'EQUIL method {table_num}\n'
printable_str += '--------------------------------\n'
printable_str += self.__inputs[table_num].__repr__()
printable_str += '\n'
return printable_str
@property
def inputs(self) -> MutableMapping[int, NexusEquilMethod]:
if not self.__properties_loaded:
self.load_equil_methods()
return self.__inputs
@property
def files(self) -> dict[int, NexusFile]:
return self.__files
def load_equil_methods(self):
# Read in equil properties from Nexus equil method files
if self.__files is not None and len(self.__files) > 0: # Check if equil files exist
for table_num in self.__files.keys(): # For each equil property method
equil_file = self.__files[table_num]
if equil_file.location is None:
raise ValueError(f'Unable to find equil file: {equil_file}')
if os.path.isfile(equil_file.location):
# Create NexusEquilMethod object
self.__inputs[table_num] = NexusEquilMethod(file=equil_file, input_number=table_num)
self.__inputs[table_num].read_properties() # Populate object with equil properties in file
self.__properties_loaded = True | /ressimpy-1.0.3.tar.gz/ressimpy-1.0.3/ResSimpy/Nexus/NexusEquilMethods.py | 0.836354 | 0.186576 | NexusEquilMethods.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.